src/bindings/c/docs/api_overview.md
This API provides a simplified interface for OpenVINO functionality that allows to:
Currently the OpenVINO C API is supported on Ubuntu* 18.04/20.04/22.04 Microsoft Windows* 10/11 and CentOS* 7.3/10.15 and above OSes. Supported Python* versions:
To configure the environment for the OpenVINO C* API, run:
On Ubuntu 20.04/22.04: source <INSTALL_DIR>/setupvars.sh .
On Windows 10/11:
. <path-to-setupvars-folder>/setupvars.ps1 in PowerShell<INSTALL_DIR>\setupvars.bat in Command PromptThe script automatically detects latest installed C* version and configures required environment if the version is supported.
typedef struct ov_version {
const char* buildNumber;
const char* description;
} ov_version_t;
typedef struct {
const char* device_name;
ov_version_t version;
} ov_core_version_t;
typedef struct {
ov_core_version_t* versions;
size_t size;
} ov_core_version_list_t;
typedef struct {
char** devices;
size_t size;
} ov_available_devices_t;
typedef struct ov_dimension {
int64_t min;
int64_t max;
} ov_dimension_t;
typedef struct {
int64_t rank;
int64_t* dims;
} ov_shape_t;
typedef struct ov_partial_shape {
ov_rank_t rank;
ov_dimension_t* dims;
} ov_partial_shape_t;
typedef struct {
enum Status {
NOT_RUN,
OPTIMIZED_OUT,
EXECUTED
} status;
int64_t real_time;
int64_t cpu_time;
const char* node_name;
const char* exec_type;
const char* node_type;
} ov_profiling_info_t;
typedef struct {
ov_profiling_info_t* profiling_infos;
size_t size;
} ov_profiling_info_list_t;
typedef struct {
void(OPENVINO_C_API_CALLBACK* callback_func)(void* args);
void* args;
} ov_callback_t;
typedef enum {
DYNAMIC = 0U, //!< Dynamic element type
BOOLEAN, //!< boolean element type
BF16, //!< bf16 element type
F16, //!< f16 element type
F32, //!< f32 element type
F64, //!< f64 element type
I4, //!< i4 element type
I8, //!< i8 element type
I16, //!< i16 element type
I32, //!< i32 element type
I64, //!< i64 element type
U1, //!< binary element type
U2, //!< u2 element type
U3, //!< u3 element type
U4, //!< u4 element type
U6, //!< u6 element type
U8, //!< u8 element type
U16, //!< u16 element type
U32, //!< u32 element type
U64, //!< u64 element type
NF4, //!< nf4 element type
F8E4M3, //!< f8e4m3 element type
F8E5M3, //!< f8e5m2 element type
STRING, //!< string element type
F4E2M1, //!< f4e2m1 element type
F8E8M0, //!< f8e8m0 element type
} ov_element_type_e;
typedef enum {
OK = 0, //!< SUCCESS
GENERAL_ERROR = -1, //!< GENERAL_ERROR
NOT_IMPLEMENTED = -2, //!< NOT_IMPLEMENTED
NETWORK_NOT_LOADED = -3, //!< NETWORK_NOT_LOADED
PARAMETER_MISMATCH = -4, //!< PARAMETER_MISMATCH
NOT_FOUND = -5, //!< NOT_FOUND
OUT_OF_BOUNDS = -6, //!< OUT_OF_BOUNDS
UNEXPECTED = -7, //!< UNEXPECTED
REQUEST_BUSY = -8, //!< REQUEST_BUSY
RESULT_NOT_READY = -9, //!< RESULT_NOT_READY
NOT_ALLOCATED = -10, //!< NOT_ALLOCATED
INFER_NOT_STARTED = -11, //!< INFER_NOT_STARTED
NETWORK_NOT_READ = -12, //!< NETWORK_NOT_READ
INFER_CANCELLED = -13, //!< INFER_CANCELLED
INVALID_C_PARAM = -14, //!< INVALID_C_PARAM
UNKNOWN_C_ERROR = -15, //!< UNKNOWN_C_ERROR
NOT_IMPLEMENT_C_METHOD = -16, //!< NOT_IMPLEMENT_C_METHOD
UNKNOW_EXCEPTION = -17, //!< UNKNOW_EXCEPTION
} ov_status_e;
typedef enum {
UNDEFINE = 0U, //!< Undefine color format
NV12_SINGLE_PLANE, //!< Image in NV12 format as single tensor
NV12_TWO_PLANES, //!< Image in NV12 format represented as separate tensors for Y and UV planes.
I420_SINGLE_PLANE, //!< Image in I420 (YUV) format as single tensor
I420_THREE_PLANES, //!< Image in I420 format represented as separate tensors for Y, U and V planes.
RGB, //!< Image in RGB interleaved format (3 channels)
BGR, //!< Image in BGR interleaved format (3 channels)
GRAY, //!< Image in GRAY format (1 channel)
RGBX, //!< Image in RGBX interleaved format (4 channels)
BGRX //!< Image in BGRX interleaved format (4 channels)
} ov_color_format_e;
typedef enum {
RESIZE_LINEAR, //!< linear algorithm
RESIZE_CUBIC, //!< cubic algorithm
RESIZE_NEAREST //!< nearest algorithm
} ov_preprocess_resize_algorithm_e;
OPENVINO_C_VAR(const char*) ov_property_key_supported_properties;
OPENVINO_C_VAR(const char*) ov_property_key_available_devices;
OPENVINO_C_VAR(const char*) ov_property_key_optimal_number_of_infer_requests;
OPENVINO_C_VAR(const char*) ov_property_key_range_for_async_infer_requests;
OPENVINO_C_VAR(const char*) ov_property_key_range_for_streams;
OPENVINO_C_VAR(const char*) ov_property_key_device_full_name;
OPENVINO_C_VAR(const char*) ov_property_key_device_capabilities;
OPENVINO_C_VAR(const char*) ov_property_key_model_name;
OPENVINO_C_VAR(const char*) ov_property_key_optimal_batch_size;
OPENVINO_C_VAR(const char*) ov_property_key_max_batch_size;
OPENVINO_C_VAR(const char*) ov_property_key_cache_dir;
OPENVINO_C_VAR(const char*) ov_property_key_num_streams;
OPENVINO_C_VAR(const char*) ov_property_key_affinity;
OPENVINO_C_VAR(const char*) ov_property_key_inference_num_threads;
OPENVINO_C_VAR(const char*) ov_property_key_hint_enable_cpu_pinning;
OPENVINO_C_VAR(const char*) ov_property_key_hint_enable_hyper_threading;
OPENVINO_C_VAR(const char*) ov_property_key_hint_performance_mode;
OPENVINO_C_VAR(const char*) ov_property_key_hint_scheduling_core_type;
OPENVINO_C_VAR(const char*) ov_property_key_hint_inference_precision;
OPENVINO_C_VAR(const char*) ov_property_key_hint_num_requests;
OPENVINO_C_VAR(const char*) ov_property_key_log_level;
OPENVINO_C_VAR(const char*) ov_property_key_hint_model_priority;
OPENVINO_C_VAR(const char*) ov_property_key_enable_profiling;
OPENVINO_C_VAR(const char*) ov_property_key_device_priorities;
OPENVINO_C_VAR(const char*) ov_property_key_hint_execution_mode;
OPENVINO_C_VAR(const char*) ov_property_key_force_tbb_terminate;
OPENVINO_C_VAR(const char*) ov_property_key_enable_mmap;
OPENVINO_C_VAR(const char*) ov_property_key_auto_batch_timeout;
OPENVINO_C_VAR(const char*) ov_property_key_intel_auto_device_bind_buffer;
OPENVINO_C_VAR(const char*) ov_property_key_intel_auto_enable_startup_fallback;
OPENVINO_C_VAR(const char*) ov_property_key_intel_auto_enable_runtime_fallback;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_context_type;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_ocl_context;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_ocl_context_device_id;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_tile_id;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_ocl_queue;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_va_device;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_shared_mem_type;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_mem_handle;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_dev_object_handle;
OPENVINO_C_VAR(const char*) ov_property_key_intel_gpu_va_plane;
This struct represents OpenVINO entity and allows you to manipulate with plugins using unified interfaces.
ov_status_e ov_core_create(ov_core_t** core)
Note: Constructs OpenVINO Core instance by default.
Parameters:
core - A pointer to the newly created ov_core_t.Return value: Status code of the operation: OK(0) for success.
ov_status_e ov_core_create_with_config(const char* xml_config_file, ov_core_t** core)
Note: Constructs OpenVINO Core instance using XML configuration file with devices description.
Parameters:
xml_config_file- A full path to.xml file containing plugins configuration. If the parameter is not specified, the default configuration is handled automatically.core - A pointer to the newly created ov_core_t.Return value: Status code of the operation: OK(0) for success.
Usage examples:
Create an ov_core_t t instance with a custom configuration location specified:
char *xml_config_file="/localdisk/plugins/my_custom_cfg.xml";
ov_core_t* core;
ov_status_e status = ov_core_create_with_config(xml_config_file, &core);
.xml file has the following structure:
<ie>
<plugins>
<plugin name="" location="" optional="yes/no">
<extensions>
<extension location=""/>
</extensions>
<properties>
<property key="" value=""/>
</properties>
</plugin>
</plugins>
</ie>
ov_status_e ov_get_openvino_version(ov_version_t* version)
Description: Get version of OpenVINO.
Parameters:
ov_version_t - a pointer to the version.Return value: Status of the operation: OK(0) for success.
Usage example:
ov_version_t version = {.description = NULL, .buildNumber = NULL};
ov_get_openvino_version(&version);
printf("description : %s \n", version.description);
printf("build number: %s \n", version.buildNumber);
ov_version_free(&version);
ov_status_e ov_core_read_model(const ov_core_t* core, const char* model_path, const char* bin_path, ov_model_t** model)
Description: Reads models from IR / ONNX / PDPD / TF / TFLite formats to create ov_model_t. You can create as many ov_model_t as you need and use them simultaneously (up to the limitation of the hardware resources).
Parameters:
core - A pointer to ov_core_t instance.model_path - Path to a model.bin_path - Path to a data file.model - A pointer to the newly created model.Return value: Status code of the operation: OK(0) for success.
Usage example:
ov_core_t* core = NULL;
ov_core_create(&core);
ov_model_t* model = NULL;
ov_core_read_model(core, "model.xml", "model.bin", &model);
ov_status_e ov_core_compile_model(const ov_core_t* core, const ov_model_t* model, const char* device_name, const size_t property_args_size, ov_compiled_model_t** compiled_model, ...);
Description: Creates a compiled model from a source model object.
Parameters:
core- A pointer to ov_core_t instance.model Model - An object acquired from Core::read_model.device_name - Name of a device to load a model to.property_args_size - How many properties args will be passed, each property contains 2 args: key and value.compiled_model - A pointer to the newly created compiled_model.... - property paramater, optional pack of pairs: <char* property_key, char* property_value> relevant only for this load operation operation.Return value: Status code of the operation: OK(0) for success.
Usage example:
ov_core_t* core = nullptr;
ov_core_create(&core);
ov_model_t* model = nullptr;
ov_core_read_model(core, xml_file_name.c_str(), bin_file_name.c_str(), &model);
const char* key = ov_property_key_hint_performance_mode;
const char* num = "LATENCY";
ov_compiled_model_t* compiled_model = nullptr;
ov_core_compile_model(core, model, "CPU", 2, &compiled_model, key, num);
...
ov_compiled_model_free(compiled_model);
ov_model_free(model);
ov_core_free(core);
ov_status_e ov_core_set_property(const ov_core_t* core, const char* device_name, ...)
Description: Sets properties for a device, acceptable keys can be found in ov_property_key_xxx.
Parameters:
core - A pointer to ov_core_t instance.device_name - Name of a device.... - property paramaters, optional pack of pairs: <char* property_key, char* property_value>.Return value: Status code of the operation: OK(0) for success.
Usage example:
ov_core_t* core = nullptr;
ov_core_create(&core);
const char* key_1 = ov_property_key_inference_num_threads;
const char* value_1 = "12";
const char* key_2 = ov_property_key_num_streams;
const char* value_2 = "7";
ov_core_set_property(core, "CPU", key_1, value_1, key_2, value_2);
...
ov_core_free(core);
ov_status_e ov_core_get_property(const ov_core_t* core, const char* device_name, const char* property_key, char** property_value)
Description: Gets properties related to device behaviour.
Parameters:
core - A pointer to ov_core_t instance.device_name - Name of a device.property_key - Property key.property_value - A pointer to property value with string format.Return value: Status code of the operation: 0 for success.
Usage example:
ov_core_t* core = nullptr;
ov_core_create(&core);
const char* key = ov_property_key_hint_performance_mode;
const char* mode = "LATENCY";
ov_core_set_property(core, "CPU", key, mode);
char* ret = nullptr;
ov_core_get_property(core, "CPU", key, &ret);
ov_free(ret);
...
ov_core_free(core);
ov_status_e ov_core_import_model(const ov_core_t* core, const char* content, const size_t content_size, const char* device_name, ov_compiled_model_t** compiled_model);
Description: Imports a compiled model from the previously exported one.
Parameters:
core - A pointer ov_core_t instance.content - A pointer to content of the exported model.content_size - Number of bytes in the exported network.device_name - Name of a device to import a compiled model for.compiled_model - A pointer to the newly created compiled_model.Return value: Status code of the operation: OK(0) for success.
ov_status_e ov_core_get_versions_by_device_name(const ov_core_t* core, const char* device_name, ov_core_version_list_t* versions)
core - A pointer ov_core_t instance.device_name - A device name to identify a plugin.versions - A pointer to versions corresponding to device_name.ov_status_e ov_core_create_context(const ov_core_t* core, const char* device_name, const size_t context_args_size, ov_remote_context_t** context, ...);
core - A pointer ov_core_t instance.device_name - Device name to identify a plugin.context_args_size - How many property args will be for this remote context creation.context - A pointer to the newly created remote context.... - variadic parmameters Actual property parameter for remote contextov_status_e ov_core_compile_model_with_context(const ov_core_t* core, const ov_model_t* model, const ov_remote_context_t* context, const size_t property_args_size, ov_compiled_model_t** compiled_model, ...);
Description: Creates a compiled model from a source model within a specified remote context.
Parameters:
core - A pointer ov_core_t instance.model - Model object acquired from ov_core_read_model.context - A pointer to the newly created remote context.property_args_size - How many args will be for this compiled model.compiled_model - A pointer to the newly created compiled_model.... - variadic parmameters Actual property parameter for remote contextReturn value: Status code of the operation: OK(0) for success.
ov_status_e ov_core_add_extension(const ov_core_t* core, const char* library_path)
core - A pointer ov_core_t instance.library_path - Path to an extension.This struct contains the information about the model read from IR and allows you to manipulate with some model parameters such as layers affinity and output layers.
ov_status_e ov_model_free(ov_model_t* model)
model - A pointer to the ov_model_t to free memory..ov_status_e ov_model_const_input(const ov_model_t* model, ov_output_const_port_t** input_port);
model - A pointer to the ov_model_t.input_port - A pointer to the ov_output_const_port_t.ov_status_e ov_model_input(const ov_model_t* model, ov_output_port_t** input_port);
Description: Get single input port of ov_model_t, which only support single input model.
Parameters:
model - A pointer to the ov_model_t.input_port - A pointer to the ov_output_port_t.Return value: Status code of the operation: OK(0) for success.
ov_status_e ov_model_input_by_name(const ov_model_t* model, const char* tensor_name, ov_output_port_t** input_port)
model - A pointer to the ov_model_t.tensor_name - Input tensor name (char *).input_port - A pointer to the ov_output_port_t.ov_status_e ov_model_input_by_index(const ov_model_t* model, const size_t index, ov_output_port_t** input_port)
model - A pointer to the ov_model_t.index - Input tensor index.input_port - A pointer to the ov_output_port_t.ov_status_e ov_model_const_output(const ov_model_t* model, ov_output_const_port_t** output_port);
model - A pointer to the ov_model_t.output_port - A pointer to the ov_output_const_port_t.ov_status_e ov_model_output(const ov_model_t* model, ov_output_port_t** output_port);
model - A pointer to the ov_model_t.output_port - A pointer to the ov_output_port_t.ov_status_e ov_model_inputs_size(const ov_model_t* model, size_t* input_size);
model - A pointer to the ov_model_t.input_size - The model's input size.ov_status_e ov_model_outputs_size(const ov_model_t* model, size_t* output_size);
model - A pointer to the ov_model_t.output_size - The model's output size.bool ov_model_is_dynamic(const ov_model_t* model)
model - A pointer to the ov_model_t.ov_status_e ov_model_reshape(const ov_model_t* model, const char** tensor_names, const ov_partial_shape_t* partial_shapes, size_t size)
model - A pointer to the ov_model_t.tensor_names - The list of input tensor names.partialShape - A PartialShape list.size - The item count in the list.ov_status_e ov_model_get_friendly_name(const ov_model_t* model, char** friendly_name)
model - A pointer to the ov_model_t.friendly_name - The model's friendly name.This struct contains the information about the model's port.
ov_status_e ov_const_port_get_shape(const ov_output_const_port_t* port, ov_shape_t* tensor_shape)
port - A pointer to ov_output_const_port_t.tensor_shape - Returned tensor shape.ov_status_e ov_port_get_shape(const ov_output_port_t* port, ov_shape_t* tensor_shape)
port - A pointer to ov_output_port_t.tensor_shape - Returned tensor shape.ov_status_e ov_port_get_any_name(const ov_output_const_port_t* port, char** tensor_name)
port - A pointer to ov_output_port_t.tensor_name - Returned tensor name.ov_status_e ov_port_get_partial_shape(const ov_output_const_port_t* port, ov_partial_shape_t* partial_shape)
port - A pointer to ov_output_const_port_t.partial_shape - Partial shape.ov_status_e ov_port_get_element_type(const ov_output_const_port_t* port, ov_element_type_e* tensor_type)
port - A pointer to ov_output_const_port_t.tensor_type - Returned tensor type.void ov_output_port_free(ov_output_port_t* port)
port - A pointer to ov_output_port_t.void ov_output_const_port_free(ov_output_const_port_t* port)
port - A pointer to ov_output_const_port_t.This struct represents a compiled model instance loaded to plugin and ready for inference.
ov_status_e ov_compiled_model_inputs_size(const ov_compiled_model_t* compiled_model, size_t* size)
compiled_model - A pointer to the ov_compiled_model_t instance.input_size - The compiled_model's input size.ov_status_e ov_compiled_model_input(const ov_compiled_model_t* compiled_model, ov_output_const_port_t** input_port)
compiled_model - A pointer to the ov_compiled_model_t instance.input_port - A pointer to the ov_output_const_port_t instance.ov_status_e ov_compiled_model_input_by_index(const ov_compiled_model_t* compiled_model, const size_t index, ov_output_const_port_t** input_port)
compiled_model - A pointer to the ov_compiled_model_t instance.index: Input index.input_port - A pointer to the ov_output_const_port_t instance.ov_status_e ov_compiled_model_input_by_name(const ov_compiled_model_t* compiled_model, const char* name, ov_output_const_port_t** input_port)
compiled_model - A pointer to the ov_compiled_model_t instance.name - input tensor name.input_port - A pointer to the ov_output_const_port_t instance.ov_compiled_model_outputs_size(const ov_compiled_model_t* compiled_model, size_t* size)
compiled_model - A pointer to the ov_compiled_model_t instance.size - The compiled_model's output size.ov_status_e ov_compiled_model_output(const ov_compiled_model_t* compiled_model, ov_output_const_port_t** output_port)
compiled_model - A pointer to the ov_compiled_model_t instance.output_port - A pointer to the ov_output_const_port_t instance.ov_status_e ov_compiled_model_output_by_index(const ov_compiled_model_t* compiled_model, const size_t index, ov_output_const_port_t** output_port)
compiled_model - A pointer to the ov_compiled_model_t instance.index: Output index.output_port - A pointer to the ov_output_const_port_t instance.ov_status_e ov_compiled_model_output_by_name(const ov_compiled_model_t* compiled_model, const char* name, ov_output_const_port_t** output_port)
compiled_model - A pointer to the ov_compiled_model_t instance.name - input tensor name.output_port - A pointer to the ov_output_const_port_t instance.ov_status_e ov_compiled_model_get_runtime_model(const ov_compiled_model_t* compiled_model, ov_model_t** model)
compiled_model - A pointer to the ov_compiled_model_t instance.model - A pointer to the ov_model_t instance.ov_status_e ov_compiled_model_create_infer_request(const ov_compiled_model_t* compiled_model, ov_infer_request_t** infer_request)
compiled_model - A pointer to ov_compiled_model_t instance.infer_request - A pointer to ov_infer_request_t instance.ov_status_e ov_compiled_model_set_property(const ov_compiled_model_t* compiled_model, ...)
compiled_model - A pointer to ov_compiled_model_t instance.... variadic paramaters, the format is <char property_key, char property_value>.ov_status_e ov_compiled_model_get_property(const ov_compiled_model_t* compiled_model, const char* property_key, char** property_value)
compiled_model - A pointer to ov_compiled_model_t instance.property_key - Property key.property_value - A pointer to property value.ov_status_e ov_compiled_model_export_model(const ov_compiled_model_t* compiled_model, const char* export_model_path)
std::ostream.compiled_model - A pointer to ov_compiled_model_t instance.export_model_path - Path to the file.void ov_compiled_model_free(ov_compiled_model_t* compiled_model)
compiled_model - A pointer to ov_compiled_model_t instance.This struct provides an interface to infer requests of ov_compiled_model_t and serves to handle infer requests execution and to set and get output data.
ov_status_e ov_infer_request_set_tensor(ov_infer_request_t* infer_request, const char* tensor_name, const ov_tensor_t* tensor)
infer_request - A pointer to ov_infer_request_t instance.tensor_name - Name of the input or output tensor.tensor - Reference to the tensor.ov_status_e ov_infer_request_set_tensor_by_port(ov_infer_request_t* infer_request, const ov_output_port_t* port, const ov_tensor_t* tensor)
infer_request - A pointer to ov_infer_request_t instance.port - Port of the input or output tensor, which can be got by calling ov_model_t/ov_compiled_model_t interface.tensor - Reference to the tensor.ov_status_e ov_infer_request_set_input_tensor_by_index(ov_infer_request_t* infer_request, const size_t idx, const ov_tensor_t* tensor)
infer_request - A pointer to ov_infer_request_t instance.idx - Index of the input port.tensor - Reference to the tensor.ov_status_e ov_infer_request_set_input_tensor(ov_infer_request_t* infer_request, const ov_tensor_t* tensor)
infer_request - A pointer to ov_infer_request_t instance.tensor - Reference to the tensor.ov_status_e ov_infer_request_set_output_tensor_by_index(ov_infer_request_t* infer_request, const size_t idx, const ov_tensor_t* tensor)
infer_request - A pointer to ov_infer_request_t instance.idx - Index of the input port.tensor - Reference to the tensor.ov_status_e ov_infer_request_set_output_tensor(ov_infer_request_t* infer_request, const ov_tensor_t* tensor)
infer_request - A pointer to ov_infer_request_t instance.tensor - Reference to the tensor.ov_status_e ov_infer_request_get_tensor(const ov_infer_request_t* infer_request, const char* tensor_name, ov_tensor_t** tensor)
infer_request - A pointer to ov_infer_request_t instance.tensor_name - Name of the input or output tensor.tensor - Reference to the tensor.ov_status_e ov_infer_request_get_tensor_by_port(const ov_infer_request_t* infer_request, const ov_output_port_t* port, ov_tensor_t** tensor)
infer_request - A pointer to ov_infer_request_t instance.port - Port of the tensor to get.tensor - Reference to the tensor.ov_status_e ov_infer_request_get_input_tensor_by_index(const ov_infer_request_t* infer_request, const size_t idx, ov_tensor_t** tensor)
infer_request - A pointer to ov_infer_request_t instance.idx - Index of the tensor to get.tensor - Reference to the tensor.ov_status_e ov_infer_request_get_output_tensor_by_index(const ov_infer_request_t* infer_request, const size_t idx, ov_tensor_t** tensor)
infer_request - A pointer to ov_infer_request_t instance.idx - Index of the tensor to get.tensor - Reference to the tensor.ov_status_e ov_infer_request_infer(ov_infer_request_t* infer_request)
infer_request - A pointer to ov_infer_request_t instance.ov_status_e ov_infer_request_start_async(ov_infer_request_t* infer_request)
infer_request - A pointer to ov_infer_request_t instance.ov_status_e ov_infer_request_cancel(ov_infer_request_t* infer_request)
infer_request - A pointer to ov_infer_request_t instance.ov_status_e ov_infer_request_wait_for(ov_infer_request_t* infer_request, const int64_t timeout);
infer_request - A pointer to ov_infer_request_t instance.timeout - Maximum duration, in milliseconds, to block for.ov_status_e ov_infer_request_set_callback(ov_infer_request_t* infer_request, const ov_callback_t* callback)
infer_request - A pointer to ov_infer_request_t instance.callback A function to be called.void ov_infer_request_free(ov_infer_request_t* infer_request)
infer_request - A pointer to ov_infer_request_t instance.void ov_infer_request_get_profiling_info(const ov_infer_request_t* infer_request, ov_profiling_info_list_t* profiling_infos)
infer_request - A pointer to ov_infer_request_t instance.profiling_infos - Vector of profiling information for operations in a model.ov_status_e ov_tensor_create_from_host_ptr(const ov_element_type_e type, const ov_shape_t shape, void* host_ptr, ov_tensor_t** tensor)
type - Tensor element typeshape - Tensor shapehost_ptr - Pointer to pre-allocated host memorytensor - A point to ov_tensor_tov_status_e ov_tensor_create_from_string_array(const char** string_array, const size_t array_size, const ov_shape_t shape, ov_tensor_t** tensor)
string_array - String arrayarray_size - Number of elements in string arrayshape - Tensor shapetensor - A point to ov_tensor_tov_status_e ov_tensor_create(const ov_element_type_e type, const ov_shape_t shape, ov_tensor_t** tensor)
type - Tensor element typeshape - Tensor shapetensor - A point to ov_tensor_tov_status_e ov_tensor_get_shape(const ov_tensor_t* tensor, ov_shape_t* shape)
tensor - A point to ov_tensor_tshape - Tensor shapeov_status_e ov_tensor_get_element_type(const ov_tensor_t* tensor, ov_element_type_e* type)
tensor - A point to ov_tensor_ttype - Tensor element type.ov_status_e ov_tensor_get_byte_size(const ov_tensor_t* tensor, size_t* byte_size)
tensor - A point to ov_tensor_tbyte_size - The size of the current Tensor in bytes.ov_status_e ov_tensor_data(const ov_tensor_t* tensor, void** data)
tensor - A point to ov_tensor_tdata - A point to host memory.ov_status_e ov_tensor_set_string_data(ov_tensor_t* tensor, const char** string_array, size_t array_size)
tensor - A point to ov_tensor_tstring_array - String arrayarray_size - Number of elements in string arrayvoid ov_tensor_free(ov_tensor_t* tensor)
tensor - A point to ov_tensor_t