Follow the example at here, but instead of input multiple words, I input multiple images into the network but the results are not as expected(it work for one input image).
I am using the model from deepinsight(resnet100)
You can find the source codes at github, the main points are
A. If batch size greater than 1, add a “data1” argument
args["data"] = NDArray(input_shape, context);
if(input_shape[0] > 1){
args["data1"] = NDArray(Shape(1), context, false);
}
B. Copy the contents of multiple images into a std::vector
std::vector<float> dlib_matrix_to_float_array(std::vector<dlib::matrix<dlib::rgb_pixel> const*> const &rgb_image)
{
size_t const pixel_size = static_cast<size_t>(rgb_image[0]->nc() * rgb_image[0]->nr()) * 3;
std::vector<float> image_vector(pixel_size * rgb_image.size());
size_t index = 0;
for(size_t i = 0; i != rgb_image.size(); ++i){
for(size_t ch = 0; ch != 3; ++ch){
for(long row = 0; row != rgb_image[i]->nr(); ++row){
for(long col = 0; col != rgb_image[i]->nc(); ++col){
auto const &pix = (*rgb_image[i])(row, col);
switch(ch){
case 0:
image_vector[index++] = pix.red;
break;
case 1:
image_vector[index++] = pix.green;
break;
case 2:
image_vector[index++] = pix.blue;
break;
default:
break;
}
}
}
}
}
return image_vector;
}
C. Copy and Forward the data
//input is the std::vector with data of the images
executor_->arg_dict()["data"].SyncCopyFromCPU(input.data(), input.size());
executor_->arg_dict()["data1"] = batch_size;
executor_->Forward(false);
D. Split the NDArray into multiple NDArray
auto features = executor_->outputs[0].Copy(Context(kCPU, 0));
Shape const shape(1, step_per_feature);
features.WaitToRead();
for(size_t i = 0; i != batch_size; ++i){
NDArray feature(features.GetData() + i * 512, shape, Context(kCPU, 0));
//following solution do not work either
/*std::vector<float> buffer(512);
for(size_t j = 0; j != 512; ++j){
buffer[j] = features.At(i, j);
}//*/
NDArray feature(&buffer[0], shape, Context(kCPU, 0));
//problem is, the feature extract is not correct
result.emplace_back(std::move(feature));
}
What kind of errors I commit?Thanks
ps : Do I need to re-export the model and change the batch size too?