diff --git a/onnxruntime/core/providers/openvino/backend_manager.cc b/onnxruntime/core/providers/openvino/backend_manager.cc index b9d79bfe4545..7e4c0dc8d726 100644 --- a/onnxruntime/core/providers/openvino/backend_manager.cc +++ b/onnxruntime/core/providers/openvino/backend_manager.cc @@ -105,7 +105,7 @@ BackendManager::BackendManager(const onnxruntime::Node& fused_node, bool BackendManager::ModelHasBatchedInputs(const ONNX_NAMESPACE::ModelProto& model_proto) const { bool has_batched_inputs = true; - for (int i = 0; i < static_cast(subgraph_context_.input_indexes).size(); i++) { + for (int i = 0; i < static_cast(subgraph_context_.input_indexes.size()); i++) { auto& input = model_proto.graph().input(subgraph_context_.input_indexes[i]); // Batch-process only raw image inputs (NCHW or NHWC layouts) diff --git a/onnxruntime/core/providers/openvino/backend_utils.h b/onnxruntime/core/providers/openvino/backend_utils.h index 5d57204281ed..82b0351e87da 100644 --- a/onnxruntime/core/providers/openvino/backend_utils.h +++ b/onnxruntime/core/providers/openvino/backend_utils.h @@ -27,7 +27,7 @@ namespace onnxruntime { namespace openvino_ep { namespace backend_utils { -const char log_tag[] = "[OpenVINO-EP] "; +const std::string log_tag = "[OpenVINO-EP] "; #ifndef NDEBUG bool IsDebugEnabled(); diff --git a/onnxruntime/core/providers/openvino/backends/basic_backend.cc b/onnxruntime/core/providers/openvino/backends/basic_backend.cc index 1a1474dbb331..09e1322ff59f 100644 --- a/onnxruntime/core/providers/openvino/backends/basic_backend.cc +++ b/onnxruntime/core/providers/openvino/backends/basic_backend.cc @@ -217,7 +217,7 @@ void BasicBackend::StartAsyncInference(Ort::KernelContext& context, OVInferReque // avoid input copies on the CPU device if (global_context_.device_type.find("CPU") != std::string::npos) { tensor_ptr = std::make_shared(input->get_element_type(), input_tensor_shape, - reinterpret_cast(tensor_data)); + (void*)tensor_data); } else { tensor_ptr = std::make_shared(input->get_element_type(), input_tensor_shape); FillInputBlob(tensor_ptr, batch_slice_idx, input_name, context, subgraph_context_); diff --git a/onnxruntime/core/providers/openvino/contexts.h b/onnxruntime/core/providers/openvino/contexts.h index 34657c46abe5..29233e72c33b 100644 --- a/onnxruntime/core/providers/openvino/contexts.h +++ b/onnxruntime/core/providers/openvino/contexts.h @@ -6,47 +6,46 @@ #include #include #include -s #include "ov_interface.h" - namespace onnxruntime { - namespace openvino_ep { +namespace onnxruntime { +namespace openvino_ep { - // Holds context applicable to the entire EP instance. - struct GlobalContext { - OVCore ie_core; - bool is_wholly_supported_graph = false; - bool enable_npu_fast_compile = false; - bool enable_opencl_throttling = false; - bool enable_dynamic_shapes = false; - size_t num_of_threads; - std::string device_type; - std::string precision_str; - std::string device_id; - std::string cache_dir; - int num_streams; - std::vector deviceAvailableList = {true, true, true, true, true, true, true, true}; - std::vector deviceTags = {"0", "1", "2", "3", "4", "5", "6", "7"}; - std::string onnx_model_name; - std::string onnx_model_path_name; - int onnx_opset_version; - void* context = 0; - bool use_api_2; - }; +// Holds context applicable to the entire EP instance. +struct GlobalContext { + OVCore ie_core; + bool is_wholly_supported_graph = false; + bool enable_npu_fast_compile = false; + bool enable_opencl_throttling = false; + bool enable_dynamic_shapes = false; + size_t num_of_threads; + std::string device_type; + std::string precision_str; + std::string device_id; + std::string cache_dir; + int num_streams; + std::vector deviceAvailableList = {true, true, true, true, true, true, true, true}; + std::vector deviceTags = {"0", "1", "2", "3", "4", "5", "6", "7"}; + std::string onnx_model_name; + std::string onnx_model_path_name; + int onnx_opset_version; + void* context = 0; + bool use_api_2; +}; - // Holds context specific to subgraph. - struct SubGraphContext { - bool has_dynamic_input_shape = false; - bool enable_batching = false; - bool set_npu_config = false; - bool is_constant = false; - void* context = 0; - std::string subgraph_name; - std::vector input_indexes; - std::unordered_map input_names; - std::unordered_map output_names; - std::string precision; - }; +// Holds context specific to subgraph. +struct SubGraphContext { + bool has_dynamic_input_shape = false; + bool enable_batching = false; + bool set_npu_config = false; + bool is_constant = false; + void* context = 0; + std::string subgraph_name; + std::vector input_indexes; + std::unordered_map input_names; + std::unordered_map output_names; + std::string precision; +}; - } // namespace openvino_ep +} // namespace openvino_ep } // namespace onnxruntime diff --git a/onnxruntime/core/providers/openvino/openvino_provider_factory.cc b/onnxruntime/core/providers/openvino/openvino_provider_factory.cc index 6e652380cb08..fbb89710c800 100644 --- a/onnxruntime/core/providers/openvino/openvino_provider_factory.cc +++ b/onnxruntime/core/providers/openvino/openvino_provider_factory.cc @@ -107,7 +107,7 @@ struct OpenVINO_Provider : Provider { if (provider_options_map.find("context") != provider_options_map.end()) { std::string str = provider_options_map.at("context"); - unsigned int64_t number = std::strtoull(str.c_str(), nullptr, 16); + uint64_t number = std::strtoull(str.c_str(), nullptr, 16); context = reinterpret_cast(number); } diff --git a/onnxruntime/core/providers/openvino/ov_interface.cc b/onnxruntime/core/providers/openvino/ov_interface.cc index 920f0d7968e7..d2ce378c97e0 100644 --- a/onnxruntime/core/providers/openvino/ov_interface.cc +++ b/onnxruntime/core/providers/openvino/ov_interface.cc @@ -17,7 +17,7 @@ using WaitMode = InferenceEngine::IInferRequest::WaitMode; namespace onnxruntime { namespace openvino_ep { -const char log_tag[] = "[OpenVINO-EP] "; +const std::string log_tag = "[OpenVINO-EP] "; std::shared_ptr OVCore::ReadModel(const std::string& model) const { try { OVTensor weights; diff --git a/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc b/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc index 4bcb4d372857..a5a0faa3a8f2 100644 --- a/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc +++ b/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc @@ -1190,7 +1190,7 @@ bool DataOps::node_is_supported(const std::mapsecond.find(optype) == opset->second.end() && op_fun == ops_supported_as_function.end()) { #ifndef NDEBUG if (openvino_ep::backend_utils::IsDebugEnabled()) { - std::cout << "The operator is not available in OpenVINO ngraph operators list + std::cout << "The operator is not available in OpenVINO ngraph operators list" << "nor the operator is a special ONNX function" << std::endl; }