How to use mx.rnn.ConvGRUCell()


#1

Does anyone know much about ConvRNN?
I need to use ConvGRU(symbol interface) to complete a task. But I just learn to use contrib.rnn.Conv2DGRUCell . How can I use the interface mx.rnn.ConvGRUCell()(code)?
my code detail:

def data_process(batch_size):
train_lst = ‘/home/feywell/demo/train.lst’
val_lst = ‘/home/feywell/demo/val.lst’
train_data = ImageSeqIter(
path_imglist=train_lst,
data_shape=(3,512,512),
label_shape=(3,512,512),
resize=512,
mean=np.array([[0.4914],[0.4822],[0.4465]]),
std=np.array([[0.2023],[0.1994],[0.2010]]),
data_name=‘data’,
label_name=‘label’,
batch_size=batch_size,
rand_crop = False,
rand_mirror = False,
)

valid_data = ImageSeqIter(
path_imglist=val_lst,
data_shape=(3,512,512),
label_shape=(3,512,512),
resize=512,
mean=np.array([[0.4914],[0.4822],[0.4465]]),
std=np.array([[0.2023],[0.1994],[0.2010]]),
data_name=‘data’,
label_name=‘label’,
batch_size=batch_size,
rand_crop = False,
rand_mirror = False
)

return train_data,valid_data
train_loader,val_loader = data_process(4)

test net

input_shape = (4,3,512,512)
ctx = mx.gpu()
data = mx.sym.Variable(‘data’)
states = mx.sym.Variable(‘states’)

net = ConvGRUCell(input_shape=input_shape, num_hidden=12,i2h_kernel=(3,3), h2h_kernel=(3,3),i2h_pad=(1,1))
print(net)
output,states = net(data,states)
print(output)
print(output.list_arguments())
print(states)
model = mx.mod.Module(symbol=output, context=ctx,label_names=None)

model.fit(train_loader, # train data
eval_data=val_loader, # validation data
optimizer=‘sgd’, # use SGD to train
optimizer_params={‘learning_rate’:0.1}, # use fixed learning rate
eval_metric=‘acc’, # report accuracy during training
batch_end_callback = mx.callback.Speedometer(4, 100), # output progress for each 100 data batches
num_epoch=10) # train for at most 10 dataset passes

error like following:

<main.ConvGRUCell object at 0x2b122c5bbfd0>

[‘data’, ‘ConvGRU_i2h_weight’, ‘ConvGRU_i2h_bias’, ‘states’, ‘ConvGRU_h2h_weight’, ‘ConvGRU_h2h_bias’]
[]

RuntimeErrorTraceback (most recent call last)
in ()
27 eval_metric=‘acc’, # report accuracy during training
28 batch_end_callback = mx.callback.Speedometer(4, 100), # output progress for each 100 data batches
—> 29 num_epoch=10) # train for at most 10 dataset passes
30 # model = mx.mod.Module(output, data_names=[‘data’,], label_names=None, context=mx.gpu())
31 print(model)

/anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/module/base_module.pyc in fit(self, train_data, eval_data, eval_metric, epoch_end_callback, batch_end_callback, kvstore, optimizer, optimizer_params, eval_end_callback, eval_batch_end_callback, initializer, arg_params, aux_params, allow_missing, force_rebind, force_init, begin_epoch, num_epoch, validation_metric, monitor)
458
459 self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
–> 460 for_training=True, force_rebind=force_rebind)
461 if monitor is not None:
462 self.install_monitor(monitor)

anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/module/module.pyc in bind(self, data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, shared_module, grad_req)
427 fixed_param_names=self._fixed_param_names,
428 grad_req=grad_req, group2ctxs=self._group2ctxs,
–> 429 state_names=self._state_names)
430 self._total_exec_bytes = self._exec_group._total_exec_bytes
431 if shared_module is not None:
/anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/module/executor_group.pyc in init(self, symbol, contexts, workload, data_shapes, label_shapes, param_names, for_training, inputs_need_grad, shared_group, logger, fixed_param_names, grad_req, state_names, group2ctxs)
262 self.num_outputs = len(self.symbol.list_outputs())
263
–> 264 self.bind_exec(data_shapes, label_shapes, shared_group)
265
266 def decide_slices(self, data_shapes):

/anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/module/executor_group.pyc in bind_exec(self, data_shapes, label_shapes, shared_group, reshape)
358 else:
359 self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i,
–> 360 shared_group))
361
362 self.data_shapes = data_shapes
/anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/module/executor_group.pyc in _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group)
636 type_dict=input_types, shared_arg_names=self.param_names,
637 shared_exec=shared_exec, group2ctx=group2ctx,
–> 638 shared_buffer=shared_data_arrays, **input_shapes)
639 self._total_exec_bytes += int(executor.debug_str().split(’\n’)[-3].split()[1])
640 return executor

anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/symbol/symbol.pyc in simple_bind(self, ctx, grad_req, type_dict, stype_dict, group2ctx, shared_arg_names, shared_exec, shared_buffer, **kwargs)
1513 error_msg += “%s: %s\n” % (k, v)
1514 error_msg += “%s” % e
-> 1515 raise RuntimeError(error_msg)
1516
1517 # update shared_buffer

RuntimeError: simple_bind error. Arguments:
data: (4, 3, 512, 512)
label: (4, 3, 512, 512)
Error in operator ConvGRU_t0_h2h: [20:55:03] src/operator/nn/./convolution-inl.h:625: Check failed: dtype != -1 (-1 vs. -1) First input must have specified type

Stack trace returned 10 entries:
[bt] (0) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(_ZN4dmlc10StackTraceB5cxx11Ev+0x48) [0x2b11487cbc68]
[bt] (1) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(_ZN4dmlc15LogMessageFatalD1Ev+0x18) [0x2b11487cc678]
[bt] (2) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(ZNK5mxnet2op15ConvolutionProp9InferTypeEPSt6vectorIiSaIiEES5_S5+0x990) [0x2b1148953ab0]
[bt] (3) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(+0x2ed4735) [0x2b114aed4735]
[bt] (4) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(+0x2ccacf8) [0x2b114accacf8]
[bt] (5)/anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(+0x2cd1d61) [0x2b114acd1d61]
[bt] (6) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(_ZN5mxnet4exec9InferTypeEON4nnvm5GraphEOSt6vectorIiSaIiEERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE+0x11f) [0x2b114acd2bcf]
[bt] (7) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(_ZN5mxnet4exec13GraphExecutor4InitEN4nnvm6SymbolERKNS_7ContextERKSt3mapINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES4_St4lessISD_ESaISt4pairIKSD_S4_EEERKSt6vectorIS4_SaIS4_EESR_SR_RKSt13unordered_mapISD_NS2_6TShapeESt4hashISD_ESt8equal_toISD_ESaISG_ISH_ST_EEERKSS_ISD_iSV_SX_SaISG_ISH_iEEES17_RKSN_INS_9OpReqTypeESaIS18_EERKSt13unordered_setISD_SV_SX_SaISD_EEPSN_INS_7NDArrayESaIS1I_EES1L_S1L_PSS_ISD_S1I_SV_SX_SaISG_ISH_S1I_EEEPNS_8ExecutorERKSS_INS2_9NodeEntryES1I_NS2_13NodeEntryHashENS2_14NodeEntryEqualESaISG_IKS1S_S1I_EEE+0x7d5) [0x2b114acb68a5]
[bt] (8) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.so(ZN5mxnet8Executor10SimpleBindEN4nnvm6SymbolERKNS_7ContextERKSt3mapINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES3_St4lessISC_ESaISt4pairIKSC_S3_EEERKSt6vectorIS3_SaIS3_EESQ_SQ_RKSt13unordered_mapISC_NS1_6TShapeESt4hashISC_ESt8equal_toISC_ESaISF_ISG_SS_EEERKSR_ISC_iSU_SW_SaISF_ISG_iEEES16_RKSM_INS_9OpReqTypeESaIS17_EERKSt13unordered_setISC_SU_SW_SaISC_EEPSM_INS_7NDArrayESaIS1H_EES1K_S1K_PSR_ISC_S1H_SU_SW_SaISF_ISG_S1H_EEEPS0+0xcd) [0x2b114acb714d]
[bt] (9) /anaconda2/lib/python2.7/site-packages/mxnet-1.0.0-py2.7.egg/mxnet/libmxnet.s

Thanks!


#2

Hi @Feywell,

So looking at the stack trace, it looks like something might be wrong with the data types.

Can you check the output of your ImageSeqIters to ensure they are returning batches of dtype float32.

And could you please clarify which ConvGRUCell cell you are using? I don’t see any imports in your code snippet. Are you using from mxnet.rnn import ConvGRUCell?


#3

@thomelane Thanks!
I check the datatype, it is the same type ‘float32’.
But there is the same error here.
I doubt that I did not initialize the first hidden states? Is that the key to the error?
And I do import ConvGRUCell from mxnet.rnn.
Can you help me?

There is my implementation details of ImageSeqIters

class ImageSeqIter(mx.io.DataIter):

 def __init__(self,  batch_size, data_shape, label_shape, data_name='data',path_imglist=None,
             label_name='label', imglist=None,aug_list=None, **kwargs):
    super(ImageSeqIter,self).__init__()
    assert path_imglist or (isinstance(imglist, list))
    
    class_name = self.__class__.__name__
    if path_imglist:
        logging.info('%s: loading image list %s...', class_name, path_imglist)
        with open(path_imglist) as fin:
            imglist = {}
            imgkeys = []
            for line in iter(fin.readline, ''):
                line = line.strip().split('\t')
                label = mx.nd.array([float(i) for i in line[1:-1]])
                key = int(line[0])
                imglist[key] = (label, line[-1])
                imgkeys.append(key)
            self.imglist = imglist
    elif isinstance(imglist, list):
        logging.info('%s: loading image list...', class_name)
        result = {}
        imgkeys = []
        index = 1
        for img in imglist:
            key = str(index)  # pylint: disable=redefined-variable-type
            index += 1
            if len(img) > 2:
                label = nd.array(img[:-1])
            elif isinstance(img[0], numeric_types):
                label = nd.array([img[0]])
            else:
                label = nd.array(img[0])
            result[key] = (label, img[-1])
            imgkeys.append(str(key))
        self.imglist = result
    else:
        self.imglist = None
    
    self.check_data_shape(data_shape)
    self.provide_data = [(data_name, (batch_size,) + data_shape)]
    self.provide_label = [(label_name, (batch_size,) + data_shape)]
    self.batch_size = batch_size
    
    if aug_list is None:
        self.auglist = mx.image.CreateAugmenter(data_shape, **kwargs)
    else:
        self.auglist = aug_list

    self.seq = imgkeys
    self.cur = 0
    self.reset()

def reset(self):
    self.cur = 0
    
def next_sample(self):
    """Helper function for reading in next sample."""
    if self.seq is not None:
        if self.cur+1>= len(self.seq):
            raise StopIteration
        idx = self.seq[self.cur]
        self.cur += 1        
   
        _,fname = self.imglist[idx]
        _,nextname = self.imglist[idx+1]
        return self.read_image(fname),self.read_image(nextname)

def next(self):
    """Returns the next batch of data."""
    batch_size = self.batch_size
    c, h, w = self.data_shape
    batch_data = nd.empty((batch_size, c, h, w))
    batch_label = nd.empty((batch_size,c,h,w))
    i = 0
    try:
        while i < batch_size:
            s,l = self.next_sample()
            data = self.imdecode(s)
            label = self.imdecode(l)     

            try:
                self.check_valid_image(data)
            except RuntimeError as e:
                logging.debug('Invalid image, skipping:  %s', str(e))
                continue
            data = self.augmentation_transform(data)
            assert i < batch_size, 'Batch size must be multiples of augmenter output length'
            batch_data[i] = self.postprocess_data(data)
            batch_label[i] = self.postprocess_data(label)
            i += 1
    except StopIteration:
        if not i:
            raise StopIteration

    return io.DataBatch([batch_data], [batch_label], batch_size - i)
    
def check_data_shape(self, data_shape):
    """Checks if the input data shape is valid"""
    if not len(data_shape) == 3:
        raise ValueError('data_shape should have length 3, with dimensions CxHxW')
    if not data_shape[0] == 3:
        raise ValueError('This iterator expects inputs to have 3 channels.')

def check_valid_image(self, data):
    """Checks if the input data is valid"""
    if len(data[0].shape) == 0:
        raise RuntimeError('Data shape is wrong')

def imdecode(self, s):
    """Decodes a string or byte string to an NDArray.
    See mx.img.imdecode for more details."""
    def locate():
        """Locate the image file/index if decode fails."""
        if self.seq is not None:
            idx = self.seq[self.cur - 1]
        else:
            idx = self.cur - 1
        if self.imglist is not None:
            _, fname = self.imglist[idx]
            msg = "filename: {}".format(fname)
        else:
            msg = "index: {}".format(idx)
        return "Broken image " + msg
    try:
        img = mx.image.imdecode(s)
    except Exception as e:
        raise RuntimeError("{}, {}".format(locate(), e))
    return img

def read_image(self, fname):
    """Reads an input image `fname` and returns the decoded raw bytes.
    Example usage:
    ----------
    >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
    """
    with open(os.path.join(self.path_root, fname), 'rb') as fin:
        img = fin.read()
    return img 

def augmentation_transform(self, data):
    """Transforms input data with specified augmentation."""
    for aug in self.auglist:
        data = aug(data)
    return data    

def postprocess_data(self, datum):
    """Final postprocessing step before image is loaded into the batch."""
    return nd.transpose(datum, axes=(2, 0, 1)).astype('float32') 

I just change the label shape from ImageIter. Because I suppose the label is the next frame data of current data.
Is it something wrong here?
Thank you ,again!


#4

You might also want to reference the Module API tutorial here as it shows how to bind and initialize your Module, both steps of which appear to be missing in your example.

mod = mx.mod.Module(out)
mod.bind(data_shapes=nd_iter.provide_data, label_shapes=nd_iter.provide_label) # create memory by given input shapes
mod.init_params()  # initial parameters with the default random initializer
mod.fit(nd_iter, num_epoch=10, ...)

Also, I can’t find much information specifically regarding mx.rnn.ConvGRUCell either, but one example usage can be found in the test file tests/python/unittest/test_rnn.py in function called test_convgru. It might be a good idea to work with a more simple model first as a baseline (e.g. mx.rnn.RNNCell) and then add convolution (e.g. mx.rnn.ConvRNNCell), before full model.


#5

Thanks! Actually,I find code here in function fit(). So I think it is not necessary to use bind() and init_params() explicitly. I’m not sure whether it is right.
And, thanks for you to help me find the test code. Is it said we need feed the data as list and the data shape just one of them?
Now, I change some code. The error messages like follows:

['data', 'ConvGRU_i2h_weight', 'ConvGRU_i2h_bias', 'states', 'ConvGRU_h2h_weight', 'ConvGRU_h2h_bias'] [] DataBatch: data shapes: [(4, 3, 512, 512)] label shapes: [(4, 3, 512, 512)]

/home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/module/base_module.py:66: Preformatted textUserWarning: Data provided by label_shapes don’t match names specified by label_names ([DataDesc[label,(4, 3, 512, 512),<class ‘numpy.float32’>,NCHW]] vs. [])

warnings.warn(msg)


MXNetError Traceback (most recent call last)
~/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/symbol/symbol.py in simple_bind(self, ctx, grad_req, type_dict, stype_dict, group2ctx, shared_arg_names, shared_exec, shared_buffer, **kwargs)
1512 shared_exec_handle,
-> 1513 ctypes.byref(exe_handle)))
1514 except MXNetError as e:

~/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/base.py in check_call(ret)
148 if ret != 0:
–> 149 raise MXNetError(py_str(_LIB.MXGetLastError()))
150

MXNetError: Error in operator ConvGRU_t0_h2h: [20:43:15] src/operator/nn/convolution.cc:276: Check failed: dtype != -1 (-1 vs. -1) First input must have specified type

Stack trace returned 10 entries:
[bt] (0) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x303a0a) [0x2ba043051a0a]
[bt] (1) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x304031) [0x2ba043052031]
[bt] (2) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x51fe84) [0x2ba04326de84]
[bt] (3) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24b6aae) [0x2ba045204aae]
[bt] (4) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24c09a7) [0x2ba04520e9a7]
[bt] (5) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24c140a) [0x2ba04520f40a]
[bt] (6) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24b0d6e) [0x2ba0451fed6e]
[bt] (7) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24b1744) [0x2ba0451ff744]
[bt] (8) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(MXExecutorSimpleBind+0x2378) [0x2ba04515e728]
[bt] (9) /home/liyang/anaconda2/envs/gluon/lib/python3.5/lib-dynload/_ctypes.cpython-35m-x86_64-linux-gnu.so(ffi_call_unix64+0x4c) [0x2ba01edc6540]

During handling of the above exception, another exception occurred:

RuntimeError Traceback (most recent call last)
in ()
30 eval_metric=‘acc’, # report accuracy during training
31 batch_end_callback = mx.callback.Speedometer(4, 100), # output progress for each 100 data batches
—> 32 num_epoch=10) # train for at most 10 dataset passes
33 # model = mx.mod.Module(output, data_names=[‘data’,], label_names=None, context=mx.gpu())
34 print(model)

~/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/module/base_module.py in fit(self, train_data, eval_data, eval_metric, epoch_end_callback, batch_end_callback, kvstore, optimizer, optimizer_params, eval_end_callback, eval_batch_end_callback, initializer, arg_params, aux_params, allow_missing, force_rebind, force_init, begin_epoch, num_epoch, validation_metric, monitor, sparse_row_id_fn)
482
483 self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
–> 484 for_training=True, force_rebind=force_rebind)
485 if monitor is not None:
486 self.install_monitor(monitor)

~/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/module/module.py in bind(self, data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, shared_module, grad_req)
428 fixed_param_names=self._fixed_param_names,
429 grad_req=grad_req, group2ctxs=self._group2ctxs,
–> 430 state_names=self._state_names)
431 self._total_exec_bytes = self._exec_group._total_exec_bytes
432 if shared_module is not None:

~/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/module/executor_group.py in init(self, symbol, contexts, workload, data_shapes, label_shapes, param_names, for_training, inputs_need_grad, shared_group, logger, fixed_param_names, grad_req, state_names, group2ctxs)
263 self.num_outputs = len(self.symbol.list_outputs())
264
–> 265 self.bind_exec(data_shapes, label_shapes, shared_group)
266
267 def decide_slices(self, data_shapes):

~/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/module/executor_group.py in bind_exec(self, data_shapes, label_shapes, shared_group, reshape)
359 else:
360 self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i,
–> 361 shared_group))
362
363 self.data_shapes = data_shapes

~/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/module/executor_group.py in _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group)
637 type_dict=input_types, shared_arg_names=self.param_names,
638 shared_exec=shared_exec, group2ctx=group2ctx,
–> 639 shared_buffer=shared_data_arrays, **input_shapes)
640 self._total_exec_bytes += int(executor.debug_str().split(’\n’)[-3].split()[1])
641 return executor

~/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/symbol/symbol.py in simple_bind(self, ctx, grad_req, type_dict, stype_dict, group2ctx, shared_arg_names, shared_exec, shared_buffer, **kwargs)
1517 error_msg += “%s: %s\n” % (k, v)
1518 error_msg += “%s” % e
-> 1519 raise RuntimeError(error_msg)
1520
1521 # update shared_buffer

RuntimeError: simple_bind error. Arguments:
data: (4, 3, 512, 512)
label: (4, 3, 512, 512)
Error in operator ConvGRU_t0_h2h: [20:43:15] src/operator/nn/convolution.cc:276: Check failed: dtype != -1 (-1 vs. -1) First input must have specified type

Stack trace returned 10 entries:
[bt] (0) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x303a0a) [0x2ba043051a0a]
[bt] (1) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x304031) [0x2ba043052031]
[bt] (2) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x51fe84) [0x2ba04326de84]
[bt] (3) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24b6aae) [0x2ba045204aae]
[bt] (4) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24c09a7) [0x2ba04520e9a7]
[bt] (5) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24c140a) [0x2ba04520f40a]
[bt] (6) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24b0d6e) [0x2ba0451fed6e]
[bt] (7) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(+0x24b1744) [0x2ba0451ff744]
[bt] (8) /home/liyang/anaconda2/envs/gluon/lib/python3.5/site-packages/mxnet/libmxnet.so(MXExecutorSimpleBind+0x2378) [0x2ba04515e728]
[bt] (9) /home/liyang/anaconda2/envs/gluon/lib/python3.5/lib-dynload/_ctypes.cpython-35m-x86_64-linux-gnu.so(ffi_call_unix64+0x4c) [0x2ba01edc6540]

Thanks again!