I’m trying to use BilinearResize2D, here is my code sample:
class DecoderBlock(nn.HybridBlock):
def __init__(self, out_channels, in_channels):
super(DecoderBlock, self).__init__()
with self.name_scope():
self.de_relu = nn.Activation(activation='relu')
self.de_1_by_b_conv = nn.Conv2D(channels=out_channels, kernel_size=1, strides=1, padding=0)
self.de_norm = nn.BatchNorm(momentum=0.1, in_channels=out_channels)
def hybrid_forward(self, F, x):
out = self.de_relu(x)
out = self.de_1_by_b_conv(out)
out = mx.nd.contrib.BilinearResize2D(out,
out_height=x.shape[2] * 2,
out_width=x.shape[3] * 2)
out = self.de_norm(out)
return out
I get the following error:
File "/home/ubuntu/anaconda3/envs/mxnet_p27/lib/python2.7/site-packages/mxnet/gluon/parameter.py", line 297, in _init_impl
self._data = [data.copyto(ctx) for ctx in self._ctx_list]
File "/home/ubuntu/anaconda3/envs/mxnet_p27/lib/python2.7/site-packages/mxnet/ndarray/ndarray.py", line 2053, in copyto
return _internal._copyto(self, out=hret)
File "<string>", line 25, in _copyto
File "/home/ubuntu/anaconda3/envs/mxnet_p27/lib/python2.7/site-packages/mxnet/_ctypes/ndarray.py", line 92, in _imperative_invoke
ctypes.byref(out_stypes)))
File "/home/ubuntu/anaconda3/envs/mxnet_p27/lib/python2.7/site-packages/mxnet/base.py", line 235, in check_call
raise MXNetError(py_str(_LIB.MXGetLastError()))
mxnet.base.MXNetError: [20:48:10] src/ndarray/ndarray.cc:1233: GPU is not enabled
You can try the following code and let me know if it is working for you. I have checked in in cpu and gpu with hybridization enabled and disabled on 1.2.1.
Please, notice, that I:
Had to pass dimensions into the block as parameters, so hybridization would work
As @ehsanmok mentioned before, you need to use F.contrib for hybridization to work
in_channels argument of the block is actually not used (as it was in your example)
import mxnet as mx
from mxnet.gluon import nn
class DecoderBlock(nn.HybridBlock):
def __init__(self, height, width, out_channels, in_channels, **kwargs):
super(DecoderBlock, self).__init__(**kwargs)
self._height = height
self._width = width
with self.name_scope():
self.de_relu = nn.Activation(activation='relu')
self.de_1_by_b_conv = nn.Conv2D(channels=out_channels, kernel_size=1, strides=1, padding=0)
self.de_norm = nn.BatchNorm(momentum=0.1, in_channels=out_channels)
def hybrid_forward(self, F, x, *args, **kwargs):
out = self.de_relu(x)
out = self.de_1_by_b_conv(out)
out = F.contrib.BilinearResize2D(out, height=self._height * 2, width=self._width * 2)
out = self.de_norm(out)
return out
ctx = mx.cpu()
# batch size x number of filters x height x width
x = mx.random.uniform(shape=(5, 50, 400, 400), ctx=ctx)
block = DecoderBlock(400, 400, 3, 3)
block.initialize(ctx=ctx)
block.hybridize()
y = block(x)
print(y)