MXNet Forum

Storing model and parameters from python and loading in c_api fails to infer shape


I can’t seem to figure out how to load a model I stored from a ipython notebook in the c api. I don’t know if the issue happens on the storing or the loading side.
I always end up with:

[18:50:04] src/nnvm/ Loading symbol saved by previous version v1.2.0. Attempting to upgrade...
[18:50:04] src/nnvm/ Symbol successfully upgraded!
[18:50:04] src/c_api/ Check failed: infer_complete The shape information of is not enough to get the shapes

The version part confuses me a bit because even after making sure I use 1.2.1 everywhere (from pip in the ipython and compiled from 1.2.1 git sources on the c_api side) I still get that message? But the real issue appears to be the inability to infer the shapes. This also happened with more recent versions of the git sources.

I initially also tried to use gluon but couldn’t figure out how to name in and outputs explicitly especially since my use case will eventually require multiple in and outputs?

I cut down my code to this:

import mxnet
from mxnet import nd, init, autograd


input_data = {
    "scalar": nd.zeros((1024,16,1)),
    "matrix": nd.zeros((1024,8,137)),
label_data = {
    "flux_label": nd.zeros((1024,10,138)),
dataiter =, label_data, batch_size=128, shuffle=False, last_batch_handle='discard', data_name=['scalar','matrix'], label_name=['flux_label'])

def network():
    in_scalar = mxnet.symbol.Variable('scalar')
    in_matrix = mxnet.symbol.Variable('matrix')
    bcast_scalar = mxnet.symbol.broadcast_to(in_scalar, shape=(0,0,137))
    inputs = mxnet.symbol.concat(in_matrix, bcast_scalar, dim=1)
    tmp = mxnet.symbol.BatchNorm(inputs)
    tmp = mxnet.symbol.FullyConnected(data=tmp, num_hidden=10*138)
    flux = tmp.reshape((0,10,138), name='fluxes')
    return flux

flux = network()
label = mxnet.symbol.Variable('flux_label')
loss = mxnet.symbol.LinearRegressionOutput(flux, label)

mod = mxnet.mod.Module(symbol=loss, context=mxnet.cpu(0), data_names=['scalar', 'matrix'], label_names=['flux_label'])

mod.bind(data_shapes=dataiter.provide_data, label_shapes=dataiter.provide_label)

for epoch in range(1):
    for batch in dataiter:
        mod.forward(batch, is_train=True)

and this:

#include <stdio.h>
#include <stdlib.h>
#include <mxnet/c_predict_api.h>
#include <mxnet/c_api.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>

char* read_whole_file(const char *name, int *size) {
	struct stat s;
	int fd = open(name, O_RDONLY);
	if (fd < 0) {
		return NULL;

	fstat(fd, &s);
	if(size != NULL) {
		*size = s.st_size;

	char *data = malloc(s.st_size+1);
	read(fd, data, s.st_size);
	data[s.st_size] = '\0';
	return data;

int main(int argc, char *argv[]) {
	int json_size;
	char *json = read_whole_file(argv[1], &json_size);
	printf("loaded json file with %d bytes\n", json_size);

	int param_size;
	char *params = read_whole_file(argv[2], &param_size);
	printf("loaded param file with %d bytes\n", param_size);

	int version;
	printf("%d\n", version);

	const char *input_keys[] = {"scalar", "matrix"};
        const mx_uint input_shape_indptr[] = {0,3,3};
        const mx_uint input_shape_data[] = {128, 16, 1, 128, 8, 137};

	const char *output_keys[] = {"fluxes"};

	PredictorHandle p_handle;
	int err = MXPredCreatePartialOut(json,
                                     1, 0,

	if(err != 0) {
		printf("%s\n", MXGetLastError());


	return 0;


So it appears the screwup was that indptr should contain cumulative size instead of sizes of the individual input shapes {0,3,6} instead of {0,3,3}. So my test case now graduated to a (hopefully independent) segfault.