diff --git a/src/bindings/python/src/pyopenvino/core/infer_request.cpp b/src/bindings/python/src/pyopenvino/core/infer_request.cpp index 9f572d273dc5f3..3e583b8ee53a89 100644 --- a/src/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/src/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -10,6 +10,7 @@ #include #include "pyopenvino/core/common.hpp" +#include "pyopenvino/core/remote_tensor.hpp" #include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -464,12 +465,29 @@ void regclass_InferRequest(py::module m) { }, R"( Gets output tensor of InferRequest. - + :return: An output Tensor for the model. If model has several outputs, an exception is thrown. :rtype: openvino.runtime.Tensor )"); + cls.def( + "set_tensor", + [](InferRequestWrapper& self, const std::string& name, const RemoteTensorWrapper& tensor) { + self.m_request->set_tensor(name, tensor.tensor); + }, + py::arg("name"), + py::arg("tensor"), + R"( + Sets input/output tensor of InferRequest. + + :param name: Name of input/output tensor. + :type name: str + :param tensor: Tensor object. The element_type and shape of a tensor + must match the model's input/output element_type and shape. + :type tensor: openvino.runtime.Tensor + )"); + cls.def( "set_tensor", [](InferRequestWrapper& self, const std::string& name, const ov::Tensor& tensor) { diff --git a/src/plugins/intel_gpu/src/graph/program_node.cpp b/src/plugins/intel_gpu/src/graph/program_node.cpp index 831e4c28021e38..3c9ad0f7317a27 100644 --- a/src/plugins/intel_gpu/src/graph/program_node.cpp +++ b/src/plugins/intel_gpu/src/graph/program_node.cpp @@ -528,7 +528,7 @@ bool program_node::is_fused_dep(size_t dep_idx) const { std::set program_node::get_lockable_input_ids() const { const auto impl = get_selected_impl(); const bool has_cpu_impl = get_preferred_impl_type() == impl_types::cpu || (impl && impl->is_cpu()); - if (has_cpu_impl) { + if (has_cpu_impl && !is_type()) { std::set dependencies_indexes; for (size_t i = 0; i < get_dependencies().size(); i++) dependencies_indexes.insert(i); diff --git a/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp index ee6486f432a380..baad7361425cca 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp @@ -3,6 +3,10 @@ // #include "openvino/core/preprocess/pre_post_process.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/runtime/intel_gpu/ocl/ocl.hpp" #include "openvino/runtime/intel_gpu/properties.hpp" #include "openvino/runtime/remote_tensor.hpp" @@ -2616,6 +2620,45 @@ ov::RemoteTensor create_tensor(ov::intel_gpu::ocl::ClContext context, } } // namespace +TEST(RemoteTensor, smoke_LockableHandling) { +#if defined(ANDROID) + GTEST_SKIP(); +#endif + + auto core = ov::Core(); + auto remote_context = core.get_default_context(ov::test::utils::DEVICE_GPU); + auto gpu_context = remote_context.as(); + auto type = ov::element::f32; + ov::Shape shape = {4}; + + auto remote_tensor = gpu_context.create_tensor(type, shape); + + auto host_tensor_in = ov::Tensor(type, shape); + init_tensor(host_tensor_in); + remote_tensor.copy_from(host_tensor_in); + + auto param_node = std::make_shared(type, ov::PartialShape{-1}); + auto const_node = std::make_shared(host_tensor_in); + auto add_node = std::make_shared(param_node, const_node); + auto shape_of_node = std::make_shared(param_node); + auto res1 = std::make_shared(add_node); + auto res2 = std::make_shared(shape_of_node); + auto model = std::make_shared(ov::ResultVector{res1, res2}, ov::ParameterVector{param_node}); + + auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); + auto request = compiled_model.create_infer_request(); + request.set_input_tensor(remote_tensor); + + request.infer(); + auto res = request.get_output_tensor(0); + auto host_res = ov::Tensor(type, shape); + res.copy_to(host_res); + + for (size_t i = 0; i < ov::shape_size(host_tensor_in.get_shape()); i++) { + ASSERT_EQ(host_res.data()[i], host_tensor_in.data()[i] * 2); + } +} + TEST_P(RemoteTensor, smoke_CopyFrom) { #if defined(ANDROID) GTEST_SKIP();