onnx模型推理

为了方便使用,这里自定义了配置的消息文件,没有使用json,主要是因为json解析bool型的值时会接析为string,需要手动转换,相对比较麻烦,protobuf的扩展性更好,使用protobuf作为配置文件。

syntax = "proto3";
package inference;
message DataConfig {
  string modelPaths = 1;
  string testDataPath = 2;
  string modelName = 3;
  repeated string filenames = 4;
}

protoc model_config.proto --python_out=.

OpenCV推理

Onnxruntime推理

def inference_with_onnx(config):
    onnx_model = os.path.join(config['modelPaths'],config['modelName'])
    session = onnxruntime.InferenceSession(onnx_model,providers=['CPUExecutionProvider'])
    output_tensor = [node.name for node in session.get_outputs()]
    input_tensor = session.get_inputs()
    image_path = os.path.join(config['modelPaths'],config['filenames'][0])
    image = cv2.imread(image_path)
    data = cv2.dnn.blobFromImage(image, scalefactor=1 / 255, size=(224, 224), mean=[0.485, 0.456, 0.406], swapRB=True,
                                 crop=False)
    output_result = session.run(output_tensor,input_feed={input_tensor[0].name:data})
    print("Class:{}".format(np.argmax(output_result[0])))