diff --git a/src/bindings/python/src/openvino/runtime/__init__.py b/src/bindings/python/src/openvino/runtime/__init__.py index 94baba5e0c3ab1..df2cab57bd0aba 100644 --- a/src/bindings/python/src/openvino/runtime/__init__.py +++ b/src/bindings/python/src/openvino/runtime/__init__.py @@ -66,6 +66,8 @@ from openvino.runtime.ie_api import tensor_from_file from openvino.runtime.ie_api import compile_model +from openvino.utils import deprecated + # Extend Node class to support binary operators Node.__add__ = opset13.add Node.__sub__ = opset13.subtract @@ -77,9 +79,9 @@ Node.__rmul__ = lambda left, right: opset13.multiply(right, left) Node.__rdiv__ = lambda left, right: opset13.divide(right, left) Node.__rtruediv__ = lambda left, right: opset13.divide(right, left) -Node.__eq__ = opset13.equal -Node.__ne__ = opset13.not_equal -Node.__lt__ = opset13.less -Node.__le__ = opset13.less_equal -Node.__gt__ = opset13.greater -Node.__ge__ = opset13.greater_equal +Node.__eq__ = deprecated(version="2025.3", message="Use ops.equal instead")(opset13.equal) +Node.__ne__ = deprecated(version="2025.3", message="Use ops.not_equal instead")(opset13.not_equal) +Node.__lt__ = deprecated(version="2025.3", message="Use ops.less instead")(opset13.less) +Node.__le__ = deprecated(version="2025.3", message="Use ops.less_equal instead")(opset13.less_equal) +Node.__gt__ = deprecated(version="2025.3", message="Use ops.greater instead")(opset13.greater) +Node.__ge__ = deprecated(version="2025.3", message="Use ops.greater_equal instead")(opset13.greater_equal) diff --git a/src/bindings/python/src/pyopenvino/graph/node.cpp b/src/bindings/python/src/pyopenvino/graph/node.cpp index 46cc6bd5e802f9..f4234a9dde2294 100644 --- a/src/bindings/python/src/pyopenvino/graph/node.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node.cpp @@ -170,6 +170,15 @@ void regclass_graph_Node(py::module m) { :type input_tensors: List[openvino.runtime.Tensor] :rtype: bool )"); + node.def("get_instance_id", + &ov::Node::get_instance_id, + R"( + Returns id of the node. + + :return: id of the node. + :rtype: int + )"); + node.def("get_input_tensor", &ov::Node::get_input_tensor, py::arg("index"), diff --git a/src/bindings/python/tests/test_graph/test_create_op.py b/src/bindings/python/tests/test_graph/test_create_op.py index 6a6d3cea2ba233..dcdb8592390ad4 100644 --- a/src/bindings/python/tests/test_graph/test_create_op.py +++ b/src/bindings/python/tests/test_graph/test_create_op.py @@ -397,18 +397,19 @@ def test_lstm_sequence_operator_bidirectional_opset1(dtype, op_name): parameter_b = ov.parameter(b_shape, name="B", dtype=dtype) direction = "BIDIRECTIONAL" - node = ov_opset1.lstm_sequence( - parameter_x, - parameter_h_t, - parameter_c_t, - parameter_seq_len, - parameter_w, - parameter_r, - parameter_b, - hidden_size, - direction, - name=op_name, - ) + with pytest.warns(DeprecationWarning): + node = ov_opset1.lstm_sequence( + parameter_x, + parameter_h_t, + parameter_c_t, + parameter_seq_len, + parameter_w, + parameter_r, + parameter_b, + hidden_size, + direction, + name=op_name, + ) assert node.get_type_name() == "LSTMSequence" assert node.get_friendly_name() == op_name @@ -419,21 +420,22 @@ def test_lstm_sequence_operator_bidirectional_opset1(dtype, op_name): activation_beta = [3.0, 2.0, 1.0] clip = 1.22 - node_param = ov_opset1.lstm_sequence( - parameter_x, - parameter_h_t, - parameter_c_t, - parameter_seq_len, - parameter_w, - parameter_r, - parameter_b, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) + with pytest.warns(DeprecationWarning): + node_param = ov_opset1.lstm_sequence( + parameter_x, + parameter_h_t, + parameter_c_t, + parameter_seq_len, + parameter_w, + parameter_r, + parameter_b, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) assert node_param.get_type_name() == "LSTMSequence" assert node_param.get_output_size() == 3 @@ -464,18 +466,18 @@ def test_lstm_sequence_operator_reverse_opset1(dtype): parameter_b = ov.parameter(b_shape, name="B", dtype=dtype) direction = "REVERSE" - - node_default = ov_opset1.lstm_sequence( - parameter_x, - parameter_h_t, - parameter_c_t, - parameter_seq_len, - parameter_w, - parameter_r, - parameter_b, - hidden_size, - direction, - ) + with pytest.warns(DeprecationWarning): + node_default = ov_opset1.lstm_sequence( + parameter_x, + parameter_h_t, + parameter_c_t, + parameter_seq_len, + parameter_w, + parameter_r, + parameter_b, + hidden_size, + direction, + ) assert node_default.get_type_name() == "LSTMSequence" assert node_default.get_output_size() == 3 @@ -484,22 +486,22 @@ def test_lstm_sequence_operator_reverse_opset1(dtype): activation_alpha = [1.0, 2.0, 3.0] activation_beta = [3.0, 2.0, 1.0] clip = 1.22 - - node_param = ov_opset1.lstm_sequence( - parameter_x, - parameter_h_t, - parameter_c_t, - parameter_seq_len, - parameter_w, - parameter_r, - parameter_b, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) + with pytest.warns(DeprecationWarning): + node_param = ov_opset1.lstm_sequence( + parameter_x, + parameter_h_t, + parameter_c_t, + parameter_seq_len, + parameter_w, + parameter_r, + parameter_b, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) assert node_param.get_type_name() == "LSTMSequence" assert node_param.get_output_size() == 3 @@ -530,18 +532,18 @@ def test_lstm_sequence_operator_forward_opset1(dtype): parameter_b = ov.parameter(b_shape, name="B", dtype=dtype) direction = "forward" - - node_default = ov_opset1.lstm_sequence( - parameter_x, - parameter_h_t, - parameter_c_t, - parameter_seq_len, - parameter_w, - parameter_r, - parameter_b, - hidden_size, - direction, - ) + with pytest.warns(DeprecationWarning): + node_default = ov_opset1.lstm_sequence( + parameter_x, + parameter_h_t, + parameter_c_t, + parameter_seq_len, + parameter_w, + parameter_r, + parameter_b, + hidden_size, + direction, + ) assert node_default.get_type_name() == "LSTMSequence" assert node_default.get_output_size() == 3 @@ -550,22 +552,22 @@ def test_lstm_sequence_operator_forward_opset1(dtype): activation_alpha = [2.0] activation_beta = [1.0] clip = 0.5 - - node = ov_opset1.lstm_sequence( - parameter_x, - parameter_h_t, - parameter_c_t, - parameter_seq_len, - parameter_w, - parameter_r, - parameter_b, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) + with pytest.warns(DeprecationWarning): + node = ov_opset1.lstm_sequence( + parameter_x, + parameter_h_t, + parameter_c_t, + parameter_seq_len, + parameter_w, + parameter_r, + parameter_b, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) assert node.get_type_name() == "LSTMSequence" assert node.get_output_size() == 3 diff --git a/src/bindings/python/tests/test_graph/test_ops_binary.py b/src/bindings/python/tests/test_graph/test_ops_binary.py index 75c8f8daf2f640..a711f4c458a567 100644 --- a/src/bindings/python/tests/test_graph/test_ops_binary.py +++ b/src/bindings/python/tests/test_graph/test_ops_binary.py @@ -6,6 +6,7 @@ import numpy as np import pytest +import warnings from openvino import Type import openvino.runtime.opset13 as ov @@ -107,27 +108,28 @@ def test_binary_logical_op_with_scalar(graph_api_helper): @pytest.mark.parametrize( - ("operator", "expected_type"), + ("operator", "expected_type", "warning_type"), [ - (operator.add, Type.f32), - (operator.sub, Type.f32), - (operator.mul, Type.f32), - (operator.truediv, Type.f32), - (operator.eq, Type.boolean), - (operator.ne, Type.boolean), - (operator.gt, Type.boolean), - (operator.ge, Type.boolean), - (operator.lt, Type.boolean), - (operator.le, Type.boolean), + (operator.add, Type.f32, warnings.catch_warnings(record=True)), + (operator.sub, Type.f32, warnings.catch_warnings(record=True)), + (operator.mul, Type.f32, warnings.catch_warnings(record=True)), + (operator.truediv, Type.f32, warnings.catch_warnings(record=True)), + (operator.eq, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.ne, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.gt, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.ge, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.lt, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.le, Type.boolean, pytest.warns(DeprecationWarning)), ], ) -def test_binary_operators(operator, expected_type): +def test_binary_operators(operator, expected_type, warning_type): value_b = np.array([[4, 5], [1, 7]], dtype=np.float32) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) - model = operator(parameter_a, value_b) + with warning_type: + model = operator(parameter_a, value_b) assert model.get_output_size() == 1 assert list(model.get_output_shape(0)) == shape @@ -135,27 +137,28 @@ def test_binary_operators(operator, expected_type): @pytest.mark.parametrize( - ("operator", "expected_type"), + ("operator", "expected_type", "warning_type"), [ - (operator.add, Type.f32), - (operator.sub, Type.f32), - (operator.mul, Type.f32), - (operator.truediv, Type.f32), - (operator.eq, Type.boolean), - (operator.ne, Type.boolean), - (operator.gt, Type.boolean), - (operator.ge, Type.boolean), - (operator.lt, Type.boolean), - (operator.le, Type.boolean), + (operator.add, Type.f32, warnings.catch_warnings(record=True)), + (operator.sub, Type.f32, warnings.catch_warnings(record=True)), + (operator.mul, Type.f32, warnings.catch_warnings(record=True)), + (operator.truediv, Type.f32, warnings.catch_warnings(record=True)), + (operator.eq, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.ne, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.gt, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.ge, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.lt, Type.boolean, pytest.warns(DeprecationWarning)), + (operator.le, Type.boolean, pytest.warns(DeprecationWarning)), ], ) -def test_binary_operators_with_scalar(operator, expected_type): +def test_binary_operators_with_scalar(operator, expected_type, warning_type): value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) - model = operator(parameter_a, value_b) + with warning_type: + model = operator(parameter_a, value_b) assert model.get_output_size() == 1 assert list(model.get_output_shape(0)) == shape diff --git a/src/bindings/python/tests/test_runtime/test_model.py b/src/bindings/python/tests/test_runtime/test_model.py index dfd1d2866c9253..62b1eac9da3865 100644 --- a/src/bindings/python/tests/test_runtime/test_model.py +++ b/src/bindings/python/tests/test_runtime/test_model.py @@ -80,7 +80,7 @@ def test_add_outputs(output, expectation, raise_msg): assert len(model.results) == 2 assert "relu_t1" in model.outputs[1].get_tensor().names assert len(new_outs) == 1 - assert new_outs[0].get_node() == model.outputs[1].get_node() + assert new_outs[0].get_node().get_instance_id() == model.outputs[1].get_node().get_instance_id() assert new_outs[0].get_index() == model.outputs[1].get_index() if e is not None: assert raise_msg in str(e.value) @@ -97,7 +97,7 @@ def test_add_output_port(): new_outs = model.add_outputs(relu1.output(0)) assert len(model.results) == 2 assert len(new_outs) == 1 - assert new_outs[0].get_node() == model.outputs[1].get_node() + assert new_outs[0].get_node().get_instance_id() == model.outputs[1].get_node().get_instance_id() assert new_outs[0].get_index() == model.outputs[1].get_index() @@ -117,9 +117,9 @@ def test_add_outputs_several_outputs(args): assert len(model.get_results()) == 3 assert len(model.results) == 3 assert len(new_outs) == 2 - assert new_outs[0].get_node() == model.outputs[1].get_node() + assert new_outs[0].get_node().get_instance_id() == model.outputs[1].get_node().get_instance_id() assert new_outs[0].get_index() == model.outputs[1].get_index() - assert new_outs[1].get_node() == model.outputs[2].get_node() + assert new_outs[1].get_node().get_instance_id() == model.outputs[2].get_node().get_instance_id() assert new_outs[1].get_index() == model.outputs[2].get_index()