using OpenCvSharp.Dnn;
using OpenCvSharp;
using OpenVinoSharp;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using OpenVinoSharp.preprocess;
using System.Diagnostics;
using OpenVinoSharp.Extensions;
using OpenVinoSharp.Extensions.model;
namespace DeepLearningDotNet
{
public class Predictor : IDisposable
{
/// <summary>
/// OpenVINO Runtime Core
/// </summary>
private Core core;
/// <summary>
/// Model of pre-loading API
/// </summary>
private Model model;
/// <summary>
/// Model loaded into the device
/// </summary>
private CompiledModel compiled;
/// <summary>
/// OpenVINO inference interface
/// </summary>
public InferRequest openvino_api_infer;
/// <summary>
/// OpenCV inference interface
/// </summary>
public Net opencv_infer;
/// <summary>
/// Interface type
/// </summary>
private string engine;
/// <summary>
/// Model picture size
/// </summary>
private int[] input_size;
public Predictor() { }
/// <summary>
/// Instantiate the inference
/// </summary>
/// <param name="model_path"></param>
/// <param name="engine"></param>
/// <param name="device"></param>
/// <exception cref="ArgumentNullException"></exception>
public Predictor(string model_path, string engine = "OpenVINO", string device = "AUTO", int[] input_size = null)
{
//Judge whether the path is legal
if (model_path == null || model_path == "" || !File.Exists(model_path))
{
throw new ArgumentNullException(nameof(model_path));
}
this.engine = engine;
if (engine == "OpenVINO")
{
// -------- Step 1. Initialize OpenVINO Runtime Core --------
core = new Core();
//Discern whether the device is available
if (!core.get_available_devices().Contains(device))
{
throw new ArgumentNullException(nameof(device));
}
// -------- Step 2. Read inference model --------
Model tempModel = core.read_model(model_path);
OvExtensions.printf_model_info(tempModel);
PrePostProcessor processor = new PrePostProcessor(tempModel);
this.input_size = input_size ?? new int[] { 1, 3, 640, 640 };
Tensor input_tensor_pro = new Tensor(new OvType(ElementType.U8), new Shape(this.input_size[0], this.input_size[2], this.input_size[3], this.input_size[1]));
InputInfo input_info = processor.input(0);
InputTensorInfo input_tensor_info = input_info.tensor();
input_tensor_info.set_from(input_tensor_pro).set_layout(new Layout("NHWC")).set_color_format(ColorFormat.BGR);
PreProcessSteps process_steps = input_info.preprocess();
process_steps.convert_color(ColorFormat.RGB).resize(ResizeAlgorithm.RESIZE_LINEAR)
.convert_element_type(new OvType(ElementType.F32)).scale(255.0f).convert_layout(new Layout("NCHW"));
model = processor.build();
// -------- Step 3. Loading a model to the device --------
compiled = core.compile_model(model, device);
// -------- Step 4. Create an infer request --------
openvino_api_infer = compiled.create_infer_request();
}
if (engine == "OpenCv")
{
opencv_infer = CvDnn.ReadNetFromOnnx(model_path);
}
}
public void Dispose()
{
openvino_api_infer.Dispose();
opencv_infer.Dispose();
compiled.Dispose();
model.Dispose();
core.Dispose();
GC.Collect();
}
/// <summary>
/// OpenVINO reasoning method
/// </summary>
/// <param name="input_data"></param>
/// <param name="input_names"></param>
/// <param name="input_size"></param>
/// <param name="output_names"></param>
/// <param name="output_sizes"></param>
/// <returns></returns>
public List<float[]> OpenVinoInfer(Mat img, List<string> input_names, int[] input_size, List<string> output_names, List<int[]> output_sizes)
{
List<float[]> returns = new List<float[]>();
try
{
// -------- Step 6. Set up input data --------
if (set_input_tensor_data(img))
{
// -------- Step 7. Do inference synchronously --------
openvino_api_infer.infer();
// -------- Step 8. Get infer result data --------
Tensor output_tensor = openvino_api_infer.get_output_tensor();
int output_length = (int)output_tensor.get_size();
float[] output_data = output_tensor.get_data<float>(output_length);
returns.Add(output_data);
}
return returns;
}
catch { return returns; }
}
/// <summary>
/// Load data into the inference
/// This function cannot be placed in the inherited class and will be leaked.
/// </summary>
/// <param name="img">img after input</param>
/// <returns></returns>
public bool set_input_tensor_data(Mat img)
{
try
{
//The input image will not be loaded into the inferencer without preprocessing
if (Math.Max(img.Size().Width, img.Size().Height) != input_size[2]
&& Math.Max(img.Size().Width, img.Size().Height) != input_size[3])
return false;
// Get input data format from the inference
Tensor input_tensor = openvino_api_infer.get_input_tensor();
Shape input_shape = input_tensor.get_shape();
byte[] input_data = new byte[input_shape[1] * input_shape[2] * input_shape[3]];
// Write to input_data array via unmanaged pointer img data
Marshal.Copy(img.Ptr(0), input_data, 0, input_data.Length);
IntPtr destination = input_tensor.data();
// Point the input_tensor pointer to the input_data array
Marshal.Copy(input_data, 0, destination, input_data.Length);
return true;
}
catch { return false; }
}
/// <summary>
/// OpenCv reasoning method
/// </summary>
/// <param name="input_data"></param>
/// <param name="input_names"></param>
/// <param name="input_size"></param>
/// <param name="output_names"></param>
/// <param name="output_sizes"></param>
/// <returns></returns>
public List<float[]> OpenCvInfer(Mat img, List<string> input_names, int[] input_size, List<string> output_names, List<int[]> output_sizes)
{
List<float[]> returns = new List<float[]>();
float[] input_data = OpenVinoSharp.Extensions.process.Permute.run(img);
var input_tensor = openvino_api_infer.get_input_tensor();
input_tensor.set_data(input_data);
openvino_api_infer.infer();
foreach (var name in output_names)
{
var output_tensor = openvino_api_infer.get_tensor(name);
returns.Add(output_tensor.get_data<float>((int)output_tensor.get_size()));
}
return returns;
}
}
}