From 6deca48413d32f1cf17d29d3477d01888c2fcaaf Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 24 Aug 2023 12:59:49 +0400 Subject: [PATCH] Moved type prop tests to new API from g to z (#19353) * Moved type prop tests to new API from g to z * Fixed build --- src/core/tests/type_prop/abs.cpp | 2 + src/core/tests/type_prop/acos.cpp | 2 + src/core/tests/type_prop/acosh.cpp | 2 + src/core/tests/type_prop/arithmetic_ops.hpp | 382 ++++----- src/core/tests/type_prop/asin.cpp | 2 + src/core/tests/type_prop/asinh.cpp | 2 + src/core/tests/type_prop/atan.cpp | 2 + src/core/tests/type_prop/atanh.cpp | 2 + src/core/tests/type_prop/ceiling.cpp | 2 + src/core/tests/type_prop/cos.cpp | 2 + src/core/tests/type_prop/cosh.cpp | 2 + src/core/tests/type_prop/erf.cpp | 2 + src/core/tests/type_prop/exp.cpp | 2 + src/core/tests/type_prop/floor.cpp | 2 + src/core/tests/type_prop/gather.cpp | 332 +++---- src/core/tests/type_prop/gather_elements.cpp | 3 +- src/core/tests/type_prop/gather_nd.cpp | 3 +- src/core/tests/type_prop/gather_tree.cpp | 24 +- src/core/tests/type_prop/gelu.cpp | 40 +- .../tests/type_prop/generate_proposals.cpp | 80 +- src/core/tests/type_prop/grid_sample.cpp | 3 +- src/core/tests/type_prop/grn.cpp | 16 +- .../tests/type_prop/group_convolution.cpp | 112 +-- .../group_convolution_backprop_data.cpp | 203 ++--- .../tests/type_prop/group_normalization.cpp | 3 +- src/core/tests/type_prop/gru_cell.cpp | 219 ++--- src/core/tests/type_prop/gru_sequence.cpp | 155 ++-- src/core/tests/type_prop/hard_sigmoid.cpp | 18 +- src/core/tests/type_prop/hsigmoid.cpp | 16 +- src/core/tests/type_prop/hswish.cpp | 16 +- src/core/tests/type_prop/idft.cpp | 64 +- src/core/tests/type_prop/if.cpp | 246 +++--- src/core/tests/type_prop/interpolate.cpp | 304 +++---- src/core/tests/type_prop/irdft.cpp | 63 +- src/core/tests/type_prop/is_finite.cpp | 3 +- src/core/tests/type_prop/is_inf.cpp | 3 +- src/core/tests/type_prop/is_nan.cpp | 3 +- src/core/tests/type_prop/log_softmax.cpp | 16 +- src/core/tests/type_prop/logical_and.cpp | 4 +- src/core/tests/type_prop/logical_not.cpp | 16 +- src/core/tests/type_prop/logical_ops.hpp | 73 +- src/core/tests/type_prop/logical_or.cpp | 4 +- src/core/tests/type_prop/logical_xor.cpp | 4 +- src/core/tests/type_prop/loop.cpp | 811 +++++++++--------- src/core/tests/type_prop/lrn.cpp | 23 +- src/core/tests/type_prop/lstm_cell.cpp | 6 +- src/core/tests/type_prop/lstm_sequence.cpp | 8 +- src/core/tests/type_prop/matmul.cpp | 263 +++--- src/core/tests/type_prop/max_pool.cpp | 45 +- src/core/tests/type_prop/mish.cpp | 27 +- src/core/tests/type_prop/multiclass_nms.cpp | 1 - src/core/tests/type_prop/mvn.cpp | 38 +- src/core/tests/type_prop/negative.cpp | 4 +- .../tests/type_prop/non_max_suppression.cpp | 1 - src/core/tests/type_prop/non_zero.cpp | 24 +- src/core/tests/type_prop/normalize_l2.cpp | 31 +- src/core/tests/type_prop/one_hot.cpp | 161 ++-- src/core/tests/type_prop/pad.cpp | 153 ++-- src/core/tests/type_prop/parameter.cpp | 14 +- src/core/tests/type_prop/prelu.cpp | 12 +- src/core/tests/type_prop/prior_box.cpp | 1 - .../tests/type_prop/prior_box_clustered.cpp | 1 - src/core/tests/type_prop/proposal.cpp | 366 ++++---- src/core/tests/type_prop/psroi_pooling.cpp | 1 - src/core/tests/type_prop/random_uniform.cpp | 2 - src/core/tests/type_prop/range.cpp | 339 ++++---- src/core/tests/type_prop/rdft.cpp | 59 +- src/core/tests/type_prop/read_value.cpp | 11 +- src/core/tests/type_prop/reduce_l1.cpp | 4 +- src/core/tests/type_prop/reduce_l2.cpp | 4 +- .../tests/type_prop/reduce_logical_and.cpp | 4 +- .../tests/type_prop/reduce_logical_or.cpp | 4 +- src/core/tests/type_prop/reduce_max.cpp | 10 +- src/core/tests/type_prop/reduce_mean.cpp | 4 +- src/core/tests/type_prop/reduce_min.cpp | 10 +- src/core/tests/type_prop/reduce_ops.hpp | 18 +- src/core/tests/type_prop/reduce_prod.cpp | 10 +- src/core/tests/type_prop/reduce_sum.cpp | 4 +- src/core/tests/type_prop/relu.cpp | 14 +- src/core/tests/type_prop/reshape.cpp | 471 +++++----- src/core/tests/type_prop/result.cpp | 28 +- src/core/tests/type_prop/reverse.cpp | 92 +- src/core/tests/type_prop/rnn_cell.cpp | 6 +- src/core/tests/type_prop/rnn_cell_base.cpp | 1 - src/core/tests/type_prop/rnn_sequence.cpp | 21 +- src/core/tests/type_prop/round.cpp | 29 +- .../type_prop/scatter_elements_update.cpp | 17 +- .../tests/type_prop/scatter_nd_update.cpp | 49 +- src/core/tests/type_prop/scatter_update.cpp | 83 +- src/core/tests/type_prop/select.cpp | 183 ++-- src/core/tests/type_prop/selu.cpp | 94 +- src/core/tests/type_prop/shape_of.cpp | 37 +- src/core/tests/type_prop/shuffle_channels.cpp | 40 +- src/core/tests/type_prop/sigmoid.cpp | 21 +- src/core/tests/type_prop/sign.cpp | 4 +- src/core/tests/type_prop/sin.cpp | 4 +- src/core/tests/type_prop/sinh.cpp | 4 +- src/core/tests/type_prop/slice.cpp | 69 +- src/core/tests/type_prop/softmax.cpp | 29 +- src/core/tests/type_prop/softplus.cpp | 18 +- src/core/tests/type_prop/space_to_batch.cpp | 155 ++-- src/core/tests/type_prop/space_to_depth.cpp | 84 +- src/core/tests/type_prop/split.cpp | 95 +- src/core/tests/type_prop/sqrt.cpp | 4 +- src/core/tests/type_prop/squeeze.cpp | 85 +- src/core/tests/type_prop/strided_slice.cpp | 206 ++--- src/core/tests/type_prop/swish.cpp | 39 +- src/core/tests/type_prop/tan.cpp | 4 +- src/core/tests/type_prop/tanh.cpp | 4 +- src/core/tests/type_prop/tensor_iterator.cpp | 149 ++-- src/core/tests/type_prop/tile.cpp | 75 +- src/core/tests/type_prop/transpose.cpp | 191 +++-- .../tests/type_prop/unary_elementwise.cpp | 9 +- src/core/tests/type_prop/unary_ops.hpp | 31 +- src/core/tests/type_prop/unsqueeze.cpp | 51 +- src/core/tests/type_prop/variadic_split.cpp | 46 +- 116 files changed, 3855 insertions(+), 3615 deletions(-) diff --git a/src/core/tests/type_prop/abs.cpp b/src/core/tests/type_prop/abs.cpp index 164b5548fe0ed7..83930f7f9b46e8 100644 --- a/src/core/tests/type_prop/abs.cpp +++ b/src/core/tests/type_prop/abs.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/abs.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/acos.cpp b/src/core/tests/type_prop/acos.cpp index 77b4aa60c61798..287307c9cf4d9e 100644 --- a/src/core/tests/type_prop/acos.cpp +++ b/src/core/tests/type_prop/acos.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/acos.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/acosh.cpp b/src/core/tests/type_prop/acosh.cpp index 14cf67e944ea3a..a198b17d6d3db1 100644 --- a/src/core/tests/type_prop/acosh.cpp +++ b/src/core/tests/type_prop/acosh.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/acosh.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/arithmetic_ops.hpp b/src/core/tests/type_prop/arithmetic_ops.hpp index 482d28e4c1f187..39d93a3c84a960 100644 --- a/src/core/tests/type_prop/arithmetic_ops.hpp +++ b/src/core/tests/type_prop/arithmetic_ops.hpp @@ -21,6 +21,7 @@ #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/util/attr_types.hpp" +using namespace ov; using namespace testing; template @@ -29,12 +30,10 @@ class ArithmeticOperator : public testing::Test {}; TYPED_TEST_SUITE_P(ArithmeticOperator); TYPED_TEST_P(ArithmeticOperator, default_constructor) { - auto A = std::make_shared( - ov::element::f32, - ov::PartialShape{-1, 4, 1, 6, ov::Dimension(1, 6), ov::Dimension(2, 6)}); - auto B = std::make_shared( - ov::element::f32, - ov::PartialShape{-1, 1, 5, 6, ov::Dimension(5, 8), ov::Dimension(5, 8)}); + auto A = std::make_shared(element::f32, + PartialShape{-1, 4, 1, 6, Dimension(1, 6), Dimension(2, 6)}); + auto B = std::make_shared(element::f32, + PartialShape{-1, 1, 5, 6, Dimension(5, 8), Dimension(5, 8)}); const auto op = std::make_shared(); @@ -58,8 +57,8 @@ TYPED_TEST_P(ArithmeticOperator, default_constructor) { } TYPED_TEST_P(ArithmeticOperator, shape_inference_2D) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2}); - auto B = std::make_shared(ov::element::f32, ov::Shape{2, 2}); + auto A = std::make_shared(element::f32, Shape{2, 2}); + auto B = std::make_shared(element::f32, Shape{2, 2}); const auto op = std::make_shared(A, B); @@ -68,8 +67,8 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_2D) { } TYPED_TEST_P(ArithmeticOperator, shape_inference_4D) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2, 3, 3}); - auto B = std::make_shared(ov::element::f32, ov::Shape{2, 2, 3, 3}); + auto A = std::make_shared(element::f32, Shape{2, 2, 3, 3}); + auto B = std::make_shared(element::f32, Shape{2, 2, 3, 3}); const auto op = std::make_shared(A, B); @@ -78,8 +77,8 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_4D) { } TYPED_TEST_P(ArithmeticOperator, default_autobroadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2}); - auto B = std::make_shared(ov::element::f32, ov::Shape{2, 2}); + auto A = std::make_shared(element::f32, Shape{2, 2}); + auto B = std::make_shared(element::f32, Shape{2, 2}); const auto op = std::make_shared(A, B); @@ -89,8 +88,8 @@ TYPED_TEST_P(ArithmeticOperator, default_autobroadcast) { } TYPED_TEST_P(ArithmeticOperator, no_autobroadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2}); - auto B = std::make_shared(ov::element::f32, ov::Shape{2, 2}); + auto A = std::make_shared(element::f32, Shape{2, 2}); + auto B = std::make_shared(element::f32, Shape{2, 2}); const auto op = std::make_shared(A, B, ov::op::AutoBroadcastType::NONE); @@ -100,8 +99,8 @@ TYPED_TEST_P(ArithmeticOperator, no_autobroadcast) { } TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_scalar_numpy_broadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{1}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{1}); const auto op = std::make_shared(A, B); @@ -110,8 +109,8 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_scalar_numpy_broadcast) { } TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_1D_numpy_broadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{5}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{5}); const auto op = std::make_shared(A, B); @@ -120,8 +119,8 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_1D_numpy_broadcast) { } TYPED_TEST_P(ArithmeticOperator, shape_inference_2D_x_4D_numpy_broadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto A = std::make_shared(element::f32, Shape{4, 5}); + auto B = std::make_shared(element::f32, Shape{2, 3, 4, 5}); const auto op = std::make_shared(A, B); @@ -130,8 +129,8 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_2D_x_4D_numpy_broadcast) { } TYPED_TEST_P(ArithmeticOperator, shape_inference_3D_x_4D_numpy_broadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{1, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{2, 3, 1, 1}); + auto A = std::make_shared(element::f32, Shape{1, 4, 5}); + auto B = std::make_shared(element::f32, Shape{2, 3, 1, 1}); const auto op = std::make_shared(A, B); @@ -140,8 +139,8 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_3D_x_4D_numpy_broadcast) { } TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_3D_numpy_broadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{8, 1, 6, 1}); - auto B = std::make_shared(ov::element::f32, ov::Shape{7, 1, 5}); + auto A = std::make_shared(element::f32, Shape{8, 1, 6, 1}); + auto B = std::make_shared(element::f32, Shape{7, 1, 5}); const auto op = std::make_shared(A, B); @@ -152,8 +151,8 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_3D_numpy_broadcast) { TYPED_TEST_P(ArithmeticOperator, static_shape_pdpd_doc_examples) { { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{3, 4}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{3, 4}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 1); const auto op = std::make_shared(A, B, autob); @@ -163,8 +162,8 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_pdpd_doc_examples) { EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{3, 1}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{3, 1}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 1); const auto op = std::make_shared(A, B, autob); @@ -174,8 +173,8 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_pdpd_doc_examples) { EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); const auto op = std::make_shared(A, B, autob); @@ -185,8 +184,8 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_pdpd_doc_examples) { EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{5}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{5}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 3); const auto op = std::make_shared(A, B, autob); @@ -196,8 +195,8 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_pdpd_doc_examples) { EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{1, 3}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{1, 3}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 0); const auto op = std::make_shared(A, B, autob); @@ -207,8 +206,8 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_pdpd_doc_examples) { EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{3, 1, 5}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{3, 1, 5}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 1); const auto op = std::make_shared(A, B, autob); @@ -221,8 +220,8 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_pdpd_doc_examples) { TYPED_TEST_P(ArithmeticOperator, static_shape_inference_4D_x_4D_pdpd_broadcast) { { - auto A = std::make_shared(ov::element::f32, ov::Shape{8, 1, 6, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{8, 1, 6, 5}); + auto A = std::make_shared(element::f32, Shape{8, 1, 6, 5}); + auto B = std::make_shared(element::f32, Shape{8, 1, 6, 5}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); const auto op = std::make_shared(A, B, autob); @@ -232,8 +231,8 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_inference_4D_x_4D_pdpd_broadcast) EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(ov::element::f32, ov::Shape{8, 7, 6, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{8, 1, 6, 5}); + auto A = std::make_shared(element::f32, Shape{8, 7, 6, 5}); + auto B = std::make_shared(element::f32, Shape{8, 1, 6, 5}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); const auto op = std::make_shared(A, B, autob); @@ -245,8 +244,8 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_inference_4D_x_4D_pdpd_broadcast) } TYPED_TEST_P(ArithmeticOperator, static_shape_inference_4D_x_3D_ax_default_pdpd_broadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{8, 7, 6, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{7, 1, 5}); + auto A = std::make_shared(element::f32, Shape{8, 7, 6, 5}); + auto B = std::make_shared(element::f32, Shape{7, 1, 5}); const auto op = std::make_shared(A, B, ov::op::AutoBroadcastType::PDPD); @@ -256,43 +255,43 @@ TYPED_TEST_P(ArithmeticOperator, static_shape_inference_4D_x_3D_ax_default_pdpd_ } TYPED_TEST_P(ArithmeticOperator, incompatible_element_types) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2, 3, 3}); - auto B = std::make_shared(ov::element::i32, ov::Shape{2, 2, 3, 3}); + auto A = std::make_shared(element::f32, Shape{2, 2, 3, 3}); + auto B = std::make_shared(element::i32, Shape{2, 2, 3, 3}); ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, incompatible_boolean_type) { - auto A = std::make_shared(ov::element::boolean, ov::Shape{2, 2, 3, 3}); - auto B = std::make_shared(ov::element::boolean, ov::Shape{2, 2, 3, 3}); + auto A = std::make_shared(element::boolean, Shape{2, 2, 3, 3}); + auto B = std::make_shared(element::boolean, Shape{2, 2, 3, 3}); ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_1D_x_1D_incompatible) { - auto A = std::make_shared(ov::element::f32, ov::Shape{3}); - auto B = std::make_shared(ov::element::f32, ov::Shape{4}); + auto A = std::make_shared(element::f32, Shape{3}); + auto B = std::make_shared(element::f32, Shape{4}); ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_3D_x_3D_incompatible) { - auto A = std::make_shared(ov::element::f32, ov::Shape{3, 5, 6}); - auto B = std::make_shared(ov::element::f32, ov::Shape{4, 10, 12}); + auto A = std::make_shared(element::f32, Shape{3, 5, 6}); + auto B = std::make_shared(element::f32, Shape{4, 10, 12}); ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_5D_x_5D_incompatible) { - auto A = std::make_shared(ov::element::f32, ov::Shape{389, 112, 12}); - auto B = std::make_shared(ov::element::f32, ov::Shape{389, 112, 19}); + auto A = std::make_shared(element::f32, Shape{389, 112, 12}); + auto B = std::make_shared(element::f32, Shape{389, 112, 19}); ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_axis_less_than_negative_1_pdpd_incompatible) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - auto B = std::make_shared(ov::element::f32, ov::Shape{3, 1}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto B = std::make_shared(element::f32, Shape{3, 1}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, -2); @@ -300,8 +299,8 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_axis_less_than_negative_1_pdpd_ } TYPED_TEST_P(ArithmeticOperator, shape_inference_dst_smaller_than_src_pdpd_broadcast) { - auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 1}); - auto B = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto A = std::make_shared(element::f32, Shape{2, 3, 4, 1}); + auto B = std::make_shared(element::f32, Shape{2, 3, 4, 5}); const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); @@ -309,33 +308,36 @@ TYPED_TEST_P(ArithmeticOperator, shape_inference_dst_smaller_than_src_pdpd_broad } TYPED_TEST_P(ArithmeticOperator, fully_dynamic_shape_broadcast_numpy) { - auto param = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY); + auto param = std::make_shared(element::f32, PartialShape::dynamic()); + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY); + const auto op = std::make_shared(param, param, autob); EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(ArithmeticOperator, fully_dynamic_shape_broadcast_none) { - auto param = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NONE); + auto param = std::make_shared(element::f32, PartialShape::dynamic()); + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE); + const auto op = std::make_shared(param, param, autob); EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(ArithmeticOperator, fully_dynamic_shape_broadcast_pdpd) { - auto param = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); + auto param = std::make_shared(element::f32, PartialShape::dynamic()); + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + const auto op = std::make_shared(param, param, autob); EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_3D) { - ov::Dimension dynamic = ov::Dimension::dynamic(); - auto A = std::make_shared(ov::element::f32, ov::PartialShape{dynamic, dynamic, 6}); - auto B = std::make_shared(ov::element::f32, ov::PartialShape{dynamic, dynamic, 6}); + Dimension dynamic = Dimension::dynamic(); + auto A = std::make_shared(element::f32, PartialShape{dynamic, dynamic, 6}); + auto B = std::make_shared(element::f32, PartialShape{dynamic, dynamic, 6}); const auto op = std::make_shared(A, B); @@ -344,11 +346,9 @@ TYPED_TEST_P(ArithmeticOperator, dynamic_shape_3D) { } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_5D) { - ov::Dimension dynamic = ov::Dimension::dynamic(); - auto A = - std::make_shared(ov::element::f32, ov::PartialShape{dynamic, 4, dynamic, dynamic, 6}); - auto B = - std::make_shared(ov::element::f32, ov::PartialShape{dynamic, 4, dynamic, dynamic, 6}); + Dimension dynamic = Dimension::dynamic(); + auto A = std::make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); + auto B = std::make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); const auto op = std::make_shared(A, B); @@ -358,11 +358,11 @@ TYPED_TEST_P(ArithmeticOperator, dynamic_shape_5D) { TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_broadcast_none) { auto A = std::make_shared( - ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(2, 7), ov::Dimension(6, -1), ov::Dimension(-1, 6), -1, 8}); + element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), Dimension(6, -1), Dimension(-1, 6), -1, 8}); auto B = std::make_shared( - ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(2, 7), ov::Dimension(6, -1), ov::Dimension(-1, 6), -1, 8}); + element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), Dimension(6, -1), Dimension(-1, 6), -1, 8}); const auto op = std::make_shared(A, B, ov::op::AutoBroadcastType::NONE); @@ -378,26 +378,12 @@ TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_broadcast_none) { TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_equal_rank_broadcast_numpy) { // Equal rank - auto A = std::make_shared(ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), - ov::Dimension(1, 3), - ov::Dimension(1, 3), - ov::Dimension(4, 8), - -1, - 1, - -1, - 1, - 3}); - auto B = std::make_shared(ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), - ov::Dimension(2, 7), - -1, - 1, - ov::Dimension(1, 3), - ov::Dimension(4, 8), - -1, - 1, - 3}); + auto A = std::make_shared( + element::f32, + PartialShape{Dimension(1, 3), Dimension(1, 3), Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); + auto B = std::make_shared( + element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); const auto op = std::make_shared(A, B); @@ -416,19 +402,11 @@ TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_equal_rank_broadcast_nu TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_a_rank_smaller_broadcast_numpy) { // `A` rank smaller - auto A = std::make_shared( - ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(4, 8), -1, 1, -1, 1, 3}); - auto B = std::make_shared(ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), - ov::Dimension(2, 7), - -1, - 1, - ov::Dimension(1, 3), - ov::Dimension(4, 8), - -1, - 1, - 3}); + auto A = std::make_shared(element::f32, + PartialShape{Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); + auto B = std::make_shared( + element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); const auto op = std::make_shared(A, B); @@ -447,19 +425,11 @@ TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_a_rank_smaller_broadcas TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_b_rank_smaller_broadcast_numpy) { // `B` rank smaller - auto A = std::make_shared(ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), - ov::Dimension(2, 7), - -1, - 1, - ov::Dimension(1, 3), - ov::Dimension(4, 8), - -1, - 1, - 3}); - auto B = std::make_shared( - ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(4, 8), -1, 1, -1, 1, 3}); + auto A = std::make_shared( + element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); + auto B = std::make_shared(element::f32, + PartialShape{Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); const auto op = std::make_shared(A, B); @@ -478,19 +448,16 @@ TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_b_rank_smaller_broadcas TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_broadcast_pdpd) { { // Equal rank - auto A = std::make_shared(ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), - ov::Dimension(2, 7), - ov::Dimension(1, 6), - /* Dimension(6, -1), */ -1, - 8}); - auto B = std::make_shared( - ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(2, 7), 1, /* 1, */ -1, 8}); + auto A = std::make_shared( + element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), Dimension(1, 6), /* Dimension(6, -1), */ -1, 8}); + auto B = + std::make_shared(element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), 1, /* 1, */ -1, 8}); - const auto op = std::make_shared(A, B, ov::op::AutoBroadcastType::PDPD); + const auto op = std::make_shared(A, B, op::AutoBroadcastType::PDPD); - EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_element_type(), element::f32); EXPECT_EQ(op->get_output_partial_shape(0), (ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(2, 7), @@ -499,28 +466,14 @@ TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_broadcast_pdpd) { 8})); } { // `A` rank smaller - auto A = std::make_shared(ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), - ov::Dimension(1, 3), - ov::Dimension(1, 3), - ov::Dimension(4, 8), - -1, - 1, - -1, - 1, - 3}); - auto B = std::make_shared(ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), - ov::Dimension(2, 7), - -1, - 1, - ov::Dimension(1, 3), - ov::Dimension(4, 8), - -1, - 1, - 3}); + auto A = std::make_shared( + element::f32, + PartialShape{Dimension(1, 3), Dimension(1, 3), Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); + auto B = std::make_shared( + element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); - const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 0); + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 0); const auto op = std::make_shared(A, B, autob); EXPECT_EQ(op->get_element_type(), ov::element::f32); @@ -536,19 +489,12 @@ TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_broadcast_pdpd) { 3})); } { // `B` rank smaller - auto A = std::make_shared(ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), - ov::Dimension(2, 7), - -1, - 1, - ov::Dimension(1, 3), - ov::Dimension(4, 8), - -1, - 1, - 3}); - auto B = std::make_shared( - ov::element::f32, - ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(4, 8), -1, 1, -1, 1, 3}); + auto A = std::make_shared( + element::f32, + PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); + auto B = + std::make_shared(element::f32, + PartialShape{Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); const auto op = std::make_shared(A, B); @@ -576,8 +522,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_a_dynamic_mixed_dims_broadcast_numpy) { set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(expected_shape, {10, 11, 0, 13}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -596,8 +543,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_b_dynamic_mixed_dims_broadcast_numpy) { set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {20, 21, 22, 0}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -617,8 +565,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_mixed_dims_broadcast_ set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {0, 21, 22, 13}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -639,8 +588,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_b_and_fully_dyn_a_bro ov::PartialShape expected_shape = {ov::Dimension(2, 4), 3, 224, 224}; ov::TensorLabel expected_labels{20, 0, 0, 0}; - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -661,8 +611,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_a_and_fully_dyn_b_bro ov::PartialShape expected_shape = {ov::Dimension(2, 4), 3, 224, 224}; ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -682,8 +633,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_interval_dims_without_one_broadcas set_shape_labels(pshape_B, {10, 11, 12, 13}); set_shape_labels(expected_shape, {10, 11, 12, 13}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -703,8 +655,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_dims_without_one_broa set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(pshape_B, {20, 21, 22, 23}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -726,8 +679,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_batch_without_one_equ ov::PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -755,8 +709,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_fully_dynamic_batch_broadcast_ ov::PartialShape expected_shape = {-1, 3, 224, 224}; ov::TensorLabel expected_labels{0, 0, 0, 0}; - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -777,8 +732,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_fully_dynamic_batch_broadcast_nump ov::PartialShape expected_shape = {-1, 3, 224, 224}; ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -795,8 +751,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_a_broadcast_numpy) { ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(ov::element::f64, A); - auto param_B = std::make_shared(ov::element::f64, B); + auto param_A = std::make_shared(element::f64, A); + auto param_B = std::make_shared(element::f64, B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -813,8 +770,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_b_broadcast_numpy) { ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(ov::element::f64, A); - auto param_B = std::make_shared(ov::element::f64, B); + auto param_A = std::make_shared(element::f64, A); + auto param_B = std::make_shared(element::f64, B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -833,8 +791,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_and_higher_rank_a_broadcast_nu ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(ov::element::f64, pshape_A); - auto param_B = std::make_shared(ov::element::f64, pshape_B); + auto param_A = std::make_shared(element::f64, pshape_A); + auto param_B = std::make_shared(element::f64, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -853,8 +812,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_and_higher_rank_b_broadcast_nu ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(ov::element::f64, pshape_A); - auto param_B = std::make_shared(ov::element::f64, pshape_B); + auto param_A = std::make_shared(element::f64, pshape_A); + auto param_B = std::make_shared(element::f64, pshape_B); + const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -874,9 +834,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_static_shape_broadcast_numpy) set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {20, 21, 12, 23}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NUMPY); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NUMPY); const auto out_shape = op->get_output_partial_shape(0); @@ -895,9 +855,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_static_shape_broadcast_numpy) { set_shape_labels(pshape_B, {30, 31, 32, 33}); set_shape_labels(expected_shape, {30, 31, 32, 33}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NUMPY); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NUMPY); const auto out_shape = op->get_output_partial_shape(0); @@ -916,9 +876,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_static_shape_broadcast_none) { set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {20, 21, 22, 23}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NONE); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NONE); auto out_shape = op->get_output_partial_shape(0); @@ -937,9 +897,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_static_shape_broadcast_none) { set_shape_labels(pshape_B, {30, 31, 32, 33}); set_shape_labels(expected_shape, {30, 31, 32, 33}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NONE); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NONE); auto out_shape = op->get_output_partial_shape(0); @@ -958,9 +918,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_dynamic_shape_broadcast_none) set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {20, 21, 22, 23}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NONE); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NONE); const auto out_shape = op->get_output_partial_shape(0); @@ -979,9 +939,9 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_dynamic_shape_broadcast_none) { set_shape_labels(pshape_B, {30, 31, 32, 33}); set_shape_labels(expected_shape, {30, 31, 32, 33}); - auto param_A = std::make_shared(ov::element::f32, pshape_A); - auto param_B = std::make_shared(ov::element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NONE); + auto param_A = std::make_shared(element::f32, pshape_A); + auto param_B = std::make_shared(element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NONE); const auto out_shape = op->get_output_partial_shape(0); diff --git a/src/core/tests/type_prop/asin.cpp b/src/core/tests/type_prop/asin.cpp index 7ee29e86bcfdbd..0dff86e512733f 100644 --- a/src/core/tests/type_prop/asin.cpp +++ b/src/core/tests/type_prop/asin.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/asin.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/asinh.cpp b/src/core/tests/type_prop/asinh.cpp index 52bf0fb6e8e6ba..c415c6a3c8e20e 100644 --- a/src/core/tests/type_prop/asinh.cpp +++ b/src/core/tests/type_prop/asinh.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/asinh.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/atan.cpp b/src/core/tests/type_prop/atan.cpp index ecc8d1b8ee1577..2ef5a87d4bac66 100644 --- a/src/core/tests/type_prop/atan.cpp +++ b/src/core/tests/type_prop/atan.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/atan.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/atanh.cpp b/src/core/tests/type_prop/atanh.cpp index 1242d761bfbe13..2b0ab5d70114af 100644 --- a/src/core/tests/type_prop/atanh.cpp +++ b/src/core/tests/type_prop/atanh.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/atanh.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/ceiling.cpp b/src/core/tests/type_prop/ceiling.cpp index b9425a9fb14dca..af7b7a5bfc5f78 100644 --- a/src/core/tests/type_prop/ceiling.cpp +++ b/src/core/tests/type_prop/ceiling.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/ceiling.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/cos.cpp b/src/core/tests/type_prop/cos.cpp index 3b23d69397f827..57023bbbc7460c 100644 --- a/src/core/tests/type_prop/cos.cpp +++ b/src/core/tests/type_prop/cos.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/cos.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/cosh.cpp b/src/core/tests/type_prop/cosh.cpp index 7c85f9a03e453c..9af935ddc6e08f 100644 --- a/src/core/tests/type_prop/cosh.cpp +++ b/src/core/tests/type_prop/cosh.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/cosh.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/erf.cpp b/src/core/tests/type_prop/erf.cpp index 20696b4241f59c..90e17b85d7fd20 100644 --- a/src/core/tests/type_prop/erf.cpp +++ b/src/core/tests/type_prop/erf.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/erf.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/exp.cpp b/src/core/tests/type_prop/exp.cpp index c1188c53bc48cf..2d61f9d1ead7bc 100644 --- a/src/core/tests/type_prop/exp.cpp +++ b/src/core/tests/type_prop/exp.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/exp.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/floor.cpp b/src/core/tests/type_prop/floor.cpp index 68c08afbe8bc2d..d7b1c99a01f8ff 100644 --- a/src/core/tests/type_prop/floor.cpp +++ b/src/core/tests/type_prop/floor.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/floor.hpp" + #include "unary_ops.hpp" using Type = ::testing::Types; diff --git a/src/core/tests/type_prop/gather.cpp b/src/core/tests/type_prop/gather.cpp index 619af60c2fb85b..9a8aa5654a95e0 100644 --- a/src/core/tests/type_prop/gather.cpp +++ b/src/core/tests/type_prop/gather.cpp @@ -2,15 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/gather.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/shape_of.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; // ------------------------------ V1 ------------------------------ @@ -19,9 +23,9 @@ TEST(type_prop, gather_v1_axis_0) { Shape params_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::f32, params_shape); + auto I = make_shared(element::i32, indices_shape); + auto A = ov::op::v0::Constant::create(element::i64, Shape{}, {0}); auto G = make_shared(P, I, A); EXPECT_EQ(G->get_element_type(), element::f32); EXPECT_EQ(G->get_shape(), out_shape); @@ -34,9 +38,9 @@ TEST(type_prop, gather_v1_uint8) { PartialShape indices_shape{2, 2}; PartialShape out_shape{2, 2, 2}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::u8, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::u8, indices_shape); + auto A = ov::op::v0::Constant::create(element::i64, Shape{}, {0}); auto G = make_shared(D, I, A); EXPECT_EQ(G->get_element_type(), element::f32); @@ -50,9 +54,9 @@ TEST(type_prop, gather_v1_float32) { PartialShape indices_shape{2, 2}; PartialShape out_shape{2, 2, 2}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::f32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::f32, indices_shape); + auto A = ov::op::v0::Constant::create(element::i64, Shape{}, {0}); auto G = make_shared(D, I, A); EXPECT_EQ(G->get_element_type(), element::f32); @@ -64,9 +68,9 @@ TEST(type_prop, gather_axis_1) { Shape params_shape{3, 3}; Shape indices_shape{1, 2}; Shape out_shape{3, 1, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto P = make_shared(element::f32, params_shape); + auto I = make_shared(element::i32, indices_shape); + auto A = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); auto G = make_shared(P, I, A); EXPECT_EQ(G->get_element_type(), element::f32); EXPECT_EQ(G->get_shape(), out_shape); @@ -74,9 +78,9 @@ TEST(type_prop, gather_axis_1) { } TEST(type_prop, gather_v1_incorrect_axis_shape) { - auto params = make_shared(element::f32, Shape{5, 6}); - auto indices = make_shared(element::i64, Shape{4}); - auto axis = make_shared(element::i64, Shape{2}); + auto params = make_shared(element::f32, Shape{5, 6}); + auto indices = make_shared(element::i64, Shape{4}); + auto axis = make_shared(element::i64, Shape{2}); OV_EXPECT_THROW(auto g = make_shared(params, indices, axis), NodeValidationFailure, @@ -84,9 +88,9 @@ TEST(type_prop, gather_v1_incorrect_axis_shape) { } TEST(type_prop, gather_v1_axis_out_of_input_rank) { - auto params = make_shared(element::f32, Shape{5, 6}); - auto indices = make_shared(element::i64, Shape{4}); - auto axis = make_shared(element::i64, Shape{1}, vector{2}); + auto params = make_shared(element::f32, Shape{5, 6}); + auto indices = make_shared(element::i64, Shape{4}); + auto axis = make_shared(element::i64, Shape{1}, vector{2}); OV_EXPECT_THROW(auto g = make_shared(params, indices, axis), ov::AssertFailure, @@ -94,10 +98,10 @@ TEST(type_prop, gather_v1_axis_out_of_input_rank) { } TEST(type_prop, gather_v1_negative_axis) { - auto params = make_shared(element::f32, Shape{5, 6, 7}); - auto indices = make_shared(element::i64, Shape{4}); + auto params = make_shared(element::f32, Shape{5, 6, 7}); + auto indices = make_shared(element::i64, Shape{4}); int64_t axis = -2; - auto axis_node = make_shared(element::i64, Shape{1}, vector{axis}); + auto axis_node = make_shared(element::i64, Shape{1}, vector{axis}); auto gather_v1 = make_shared(params, indices, axis_node); EXPECT_EQ(gather_v1->get_axis(), 1); } @@ -107,9 +111,9 @@ TEST(type_prop, gather_1_dynamic_value_and_label_propagation) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); - auto shape_0 = std::make_shared(param_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); + auto shape_0 = std::make_shared(param_0); const auto& et = element::i64; std::vector zero{0}; @@ -125,21 +129,21 @@ TEST(type_prop, gather_1_dynamic_value_and_label_propagation) { } TEST(type_prop, dynamic_value_propagation) { - auto param = make_shared(element::f32, PartialShape{-1, 3, -1, -1}); + auto param = make_shared(element::f32, PartialShape{-1, 3, -1, -1}); auto shape_of = std::make_shared(param, element::i32); - auto indices = op::Constant::create(element::i32, {}, {1}); - auto axis = op::Constant::create(element::i32, {}, {0}); + auto indices = ov::op::v0::Constant::create(element::i32, {}, {1}); + auto axis = ov::op::v0::Constant::create(element::i32, {}, {0}); auto gather = std::make_shared(shape_of, indices, axis); - auto add = std::make_shared(gather, op::Constant::create(element::i32, {}, {0})); + auto add = std::make_shared(gather, ov::op::v0::Constant::create(element::i32, {}, {0})); - auto range = std::make_shared(op::Constant::create(element::i32, {}, {0}), + auto range = std::make_shared(ov::op::v0::Constant::create(element::i32, {}, {0}), add, - op::Constant::create(element::i32, {}, {1}), + ov::op::v0::Constant::create(element::i32, {}, {1}), element::i64); - auto RIC = std::make_shared(param, range, op::Constant::create(element::i32, {}, {1})); + auto RIC = std::make_shared(param, range, ov::op::v0::Constant::create(element::i32, {}, {1})); EXPECT_EQ(RIC->get_element_type(), element::f32); EXPECT_EQ(RIC->get_output_partial_shape(0), (PartialShape{-1, 3, -1, -1})); @@ -153,9 +157,9 @@ TEST(type_prop, gather_7_axis_0) { PartialShape out_shape{2, 2, 2}; int64_t batch_dims = 0; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i32, indices_shape); + auto A = ov::op::v0::Constant::create(element::i64, Shape{}, {0}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -169,9 +173,9 @@ TEST(type_prop, gather_7_axis_1) { PartialShape out_shape{3, 1, 2}; int64_t axis = 1; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i32, indices_shape); + auto A = ov::op::v0::Constant::create(element::i64, Shape{}, {axis}); auto G = make_shared(D, I, A); EXPECT_EQ(G->get_element_type(), element::f32); @@ -185,9 +189,9 @@ TEST(type_prop, gather_7_negative_axis) { PartialShape out_shape{5, 4, 7}; int64_t axis = -2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A); EXPECT_EQ(G->get_axis(), 1); @@ -201,9 +205,9 @@ TEST(type_prop, gather_7_dynamic_pshape_batch_dims_1_axis_1) { int64_t axis = 1; int64_t batch_dims = 1; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -217,9 +221,9 @@ TEST(type_prop, gather_7_dynamic_pshape_batch_dims_1_axis_3) { int64_t axis = 3; int64_t batch_dims = 1; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -233,9 +237,9 @@ TEST(type_prop, gather_7_dynamic_2d_pshape_batch_dim) { int64_t axis = 2; int64_t batch_dims = 2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -249,9 +253,9 @@ TEST(type_prop, gather_7_dynamic_2d_pshape_batch_dim_axis_3) { int64_t axis = 3; int64_t batch_dims = 2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -265,9 +269,9 @@ TEST(type_prop, gather_7_dynamic_rank) { int64_t axis = 3; int64_t batch_dims = 2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -281,9 +285,9 @@ TEST(type_prop, gather_7_axis_boundcheck_for_dynamic_data_rank) { int64_t axis = 3; int64_t batch_dims = 2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -297,9 +301,9 @@ TEST(type_prop, gather_7_dynamic_rank_negative_batch_dims) { int64_t axis = 3; int64_t batch_dims = -2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -312,9 +316,9 @@ TEST(type_prop, gather_7_axis_not_set) { // default batch_dims = 0 PartialShape out_shape = PartialShape::dynamic(5); // out_rank = data_rank + indices_rank - 1 - batch_dims - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i32, Shape{1}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i32, Shape{1}); auto G = make_shared(D, I, A); EXPECT_EQ(G->get_element_type(), element::f32); @@ -327,9 +331,9 @@ TEST(type_prop, gather_7_axis_not_set_positive_batch_dims) { int64_t batch_dims = 1; PartialShape out_shape = PartialShape({2, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i32, Shape{1}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i32, Shape{1}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -341,9 +345,9 @@ TEST(type_prop, gather_7_dynamic_value_and_label_propagation) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); - auto shape_0 = std::make_shared(param_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); + auto shape_0 = std::make_shared(param_0); const auto& et = element::i64; std::vector zero{0}; @@ -361,9 +365,9 @@ TEST(type_prop, gather_7_dynamic_value_and_label_propagation) { // --------------------- V7 Negative tests ------------------------------ TEST(type_prop, gather_7_incorrect_axis_shape) { - auto D = make_shared(element::f32, Shape{5, 6}); - auto I = make_shared(element::i64, Shape{4}); - auto A = make_shared(element::i64, Shape{2}); + auto D = make_shared(element::f32, Shape{5, 6}); + auto I = make_shared(element::i64, Shape{4}); + auto A = make_shared(element::i64, Shape{2}); OV_EXPECT_THROW(auto g = make_shared(D, I, A), NodeValidationFailure, @@ -371,9 +375,9 @@ TEST(type_prop, gather_7_incorrect_axis_shape) { } TEST(type_prop, gather_7_axis_out_of_input_rank) { - auto D = make_shared(element::f32, Shape{5, 6}); - auto I = make_shared(element::i64, Shape{4}); - auto A = make_shared(element::i64, Shape{1}, vector{2}); + auto D = make_shared(element::f32, Shape{5, 6}); + auto I = make_shared(element::i64, Shape{4}); + auto A = make_shared(element::i64, Shape{1}, vector{2}); int64_t batch_dims = 0; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -385,10 +389,10 @@ TEST(type_prop, gather_7_dynamic_batch_dims_inconsistent) { PartialShape data_shape{Dimension(1, 7), 20, 20}; PartialShape indices_shape{Dimension(8, 10), 3, 8}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); int64_t axis = 1; - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); int64_t batch_dims = 1; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -400,10 +404,10 @@ TEST(type_prop, gather_7_batch_dims_less_check) { PartialShape data_shape{1, 3, 20}; PartialShape indices_shape{1, 3, 8}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); int64_t axis = 1; - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); int64_t batch_dims = 2; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -415,10 +419,10 @@ TEST(type_prop, gather_7_batch_dims_less_indices_rank_check) { PartialShape data_shape{1, 20, 20, 22, 22}; PartialShape indices_shape{1, 3}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); int64_t axis = 4; - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); int64_t batch_dims = 3; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -430,10 +434,10 @@ TEST(type_prop, gather_7_indices_type_check) { PartialShape data_shape{1, 20, 20, 22, 22}; PartialShape indices_shape{1, 3}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::f32, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::f32, indices_shape); int64_t axis = 4; - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); int64_t batch_dims = 0; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -445,10 +449,10 @@ TEST(type_prop, gather_7_axis_type_check) { PartialShape data_shape{1, 20, 20, 22, 22}; PartialShape indices_shape{1, 3}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i32, indices_shape); int64_t axis = 4; - auto A = make_shared(element::f32, Shape{1}, vector{axis}); + auto A = make_shared(element::f32, Shape{1}, vector{axis}); int64_t batch_dims = 0; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -464,9 +468,9 @@ TEST(type_prop, gather_v8_axis_0) { PartialShape out_shape{2, 2, 2}; int64_t batch_dims = 0; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i32, indices_shape); + auto A = ov::op::v0::Constant::create(element::i64, Shape{}, {0}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -480,9 +484,9 @@ TEST(type_prop, gather_v8_axis_1) { PartialShape out_shape{3, 1, 2}; int64_t axis = 1; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i32, indices_shape); + auto A = ov::op::v0::Constant::create(element::i64, Shape{}, {axis}); auto G = make_shared(D, I, A); EXPECT_EQ(G->get_element_type(), element::f32); @@ -496,9 +500,9 @@ TEST(type_prop, gather_v8_negative_axis) { PartialShape out_shape{5, 4, 7}; int64_t axis = -2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A); EXPECT_EQ(G->get_axis(), 1); @@ -512,9 +516,9 @@ TEST(type_prop, gather_v8_dynamic_pshape_batch_dims_1_axis_1) { int64_t axis = 1; int64_t batch_dims = 1; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -528,9 +532,9 @@ TEST(type_prop, gather_v8_dynamic_pshape_batch_dims_1_axis_3) { int64_t axis = 3; int64_t batch_dims = 1; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -544,9 +548,9 @@ TEST(type_prop, gather_v8_dim_no_bound_pshape_batch_dims_1_axis_3) { int64_t axis = 3; int64_t batch_dims = 1; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -560,9 +564,9 @@ TEST(type_prop, gather_v8_dynamic_2d_pshape_batch_dim) { int64_t axis = 2; int64_t batch_dims = 2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -576,9 +580,9 @@ TEST(type_prop, gather_v8_dynamic_2d_pshape_batch_dim_axis_3) { int64_t axis = 3; int64_t batch_dims = 2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -592,9 +596,9 @@ TEST(type_prop, gather_v8_dynamic_rank) { int64_t axis = 3; int64_t batch_dims = 2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -608,9 +612,9 @@ TEST(type_prop, gather_v8_axis_boundcheck_for_dynamic_data_rank) { int64_t axis = 3; int64_t batch_dims = 2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -624,9 +628,9 @@ TEST(type_prop, gather_v8_dynamic_rank_negative_batch_dims) { int64_t axis = 3; int64_t batch_dims = -2; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -639,9 +643,9 @@ TEST(type_prop, gather_v8_axis_not_set) { // default batch_dims = 0 PartialShape out_shape = PartialShape::dynamic(5); // out_rank = data_rank + indices_rank - 1 - batch_dims - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i32, Shape{1}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i32, Shape{1}); auto G = make_shared(D, I, A); EXPECT_EQ(G->get_element_type(), element::f32); @@ -654,9 +658,9 @@ TEST(type_prop, gather_v8_axis_not_set_positive_batch_dims) { int64_t batch_dims = 1; PartialShape out_shape = PartialShape({2, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = make_shared(element::i32, Shape{1}); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); + auto A = make_shared(element::i32, Shape{1}); auto G = make_shared(D, I, A, batch_dims); EXPECT_EQ(G->get_element_type(), element::f32); @@ -669,9 +673,9 @@ TEST(type_prop, gather_v8_dynamic_value_and_label_propagation) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); - auto shape_0 = std::make_shared(param_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); + auto shape_0 = std::make_shared(param_0); const auto& et = element::i64; std::vector zero{0}; @@ -692,9 +696,9 @@ TEST(type_prop, gather_v8_dynamic_value_and_label_propagation_interval_dim) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); - auto shape_0 = std::make_shared(param_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); + auto shape_0 = std::make_shared(param_0); const auto& et = element::i64; std::vector zero{0}; @@ -710,9 +714,9 @@ TEST(type_prop, gather_v8_dynamic_value_and_label_propagation_interval_dim) { } TEST(type_prop, gather_v8_use_default_ctor) { - auto D = make_shared(element::f32, PartialShape{2, 1, 200, 400}); - auto I = make_shared(element::i64, PartialShape{2, 2}); - auto A = make_shared(element::i64, Shape{1}, vector{-1}); + auto D = make_shared(element::f32, PartialShape{2, 1, 200, 400}); + auto I = make_shared(element::i64, PartialShape{2, 2}); + auto A = make_shared(element::i64, Shape{1}, vector{-1}); constexpr int64_t batch_dims = 1; auto G = make_shared(); @@ -729,9 +733,9 @@ TEST(type_prop, gather_v8_use_default_ctor) { // --------------------- V8 Negative tests ------------------------------ TEST(type_prop, gather_v8_incorrect_axis_shape) { - auto D = make_shared(element::f32, Shape{5, 6}); - auto I = make_shared(element::i64, Shape{4}); - auto A = make_shared(element::i64, Shape{2}); + auto D = make_shared(element::f32, Shape{5, 6}); + auto I = make_shared(element::i64, Shape{4}); + auto A = make_shared(element::i64, Shape{2}); OV_EXPECT_THROW(auto g = make_shared(D, I, A), NodeValidationFailure, @@ -739,9 +743,9 @@ TEST(type_prop, gather_v8_incorrect_axis_shape) { } TEST(type_prop, gather_v8_axis_out_of_input_rank) { - auto D = make_shared(element::f32, Shape{5, 6}); - auto I = make_shared(element::i64, Shape{4}); - auto A = make_shared(element::i64, Shape{1}, vector{2}); + auto D = make_shared(element::f32, Shape{5, 6}); + auto I = make_shared(element::i64, Shape{4}); + auto A = make_shared(element::i64, Shape{1}, vector{2}); int64_t batch_dims = 0; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -753,10 +757,10 @@ TEST(type_prop, gather_v8_dynamic_batch_dims_inconsistent) { PartialShape data_shape{Dimension(1, 7), 20, 20}; PartialShape indices_shape{Dimension(8, 10), 3, 8}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); int64_t axis = 1; - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); int64_t batch_dims = 1; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -768,10 +772,10 @@ TEST(type_prop, gather_v8_batch_dims_less_check) { PartialShape data_shape{1, 3, 20}; PartialShape indices_shape{1, 3, 8}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); int64_t axis = 1; - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); int64_t batch_dims = 2; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -783,10 +787,10 @@ TEST(type_prop, gather_v8_batch_dims_less_indices_rank_check) { PartialShape data_shape{1, 20, 20, 22, 22}; PartialShape indices_shape{1, 3}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i64, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i64, indices_shape); int64_t axis = 4; - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); int64_t batch_dims = 3; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -798,10 +802,10 @@ TEST(type_prop, gather_v8_indices_type_check) { PartialShape data_shape{1, 20, 20, 22, 22}; PartialShape indices_shape{1, 3}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::f32, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::f32, indices_shape); int64_t axis = 4; - auto A = make_shared(element::i64, Shape{1}, vector{axis}); + auto A = make_shared(element::i64, Shape{1}, vector{axis}); int64_t batch_dims = 0; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), @@ -813,10 +817,10 @@ TEST(type_prop, gather_v8_axis_type_check) { PartialShape data_shape{1, 20, 20, 22, 22}; PartialShape indices_shape{1, 3}; - auto D = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); + auto D = make_shared(element::f32, data_shape); + auto I = make_shared(element::i32, indices_shape); int64_t axis = 4; - auto A = make_shared(element::f32, Shape{1}, vector{axis}); + auto A = make_shared(element::f32, Shape{1}, vector{axis}); int64_t batch_dims = 0; OV_EXPECT_THROW(auto g = make_shared(D, I, A, batch_dims), diff --git a/src/core/tests/type_prop/gather_elements.cpp b/src/core/tests/type_prop/gather_elements.cpp index c0f768e659b96c..fcd72878db6adb 100644 --- a/src/core/tests/type_prop/gather_elements.cpp +++ b/src/core/tests/type_prop/gather_elements.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/ops.hpp" diff --git a/src/core/tests/type_prop/gather_nd.cpp b/src/core/tests/type_prop/gather_nd.cpp index 4d83564cdbe250..250aba9121e302 100644 --- a/src/core/tests/type_prop/gather_nd.cpp +++ b/src/core/tests/type_prop/gather_nd.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/ops.hpp" diff --git a/src/core/tests/type_prop/gather_tree.cpp b/src/core/tests/type_prop/gather_tree.cpp index 7077c4a74fd286..9cc618da9d9648 100644 --- a/src/core/tests/type_prop/gather_tree.cpp +++ b/src/core/tests/type_prop/gather_tree.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include #include #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/ops.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; namespace { @@ -32,12 +32,14 @@ std::shared_ptr makeGatherTreeOp(const GatherTreeInputParams& p) { if (p.size() != gather_tree_required_inputs) { throw runtime_error("GatherTree requires 4 inputs"); } - auto step_ids = make_shared(p.at(step_ids_input_idx).in_et, p.at(step_ids_input_idx).in_pshape); + auto step_ids = + make_shared(p.at(step_ids_input_idx).in_et, p.at(step_ids_input_idx).in_pshape); auto parent_idx = - make_shared(p.at(parent_idx_input_idx).in_et, p.at(parent_idx_input_idx).in_pshape); + make_shared(p.at(parent_idx_input_idx).in_et, p.at(parent_idx_input_idx).in_pshape); auto max_seq_len = - make_shared(p.at(max_seq_len_input_idx).in_et, p.at(max_seq_len_input_idx).in_pshape); - auto end_token = make_shared(p.at(end_token_input_idx).in_et, p.at(end_token_input_idx).in_pshape); + make_shared(p.at(max_seq_len_input_idx).in_et, p.at(max_seq_len_input_idx).in_pshape); + auto end_token = + make_shared(p.at(end_token_input_idx).in_et, p.at(end_token_input_idx).in_pshape); return make_shared(step_ids, parent_idx, max_seq_len, end_token); } } // namespace @@ -45,10 +47,10 @@ std::shared_ptr makeGatherTreeOp(const GatherTreeInputParams& p) { TEST(type_prop, gather_tree_default_constructor) { auto op = std::make_shared(); - auto step_ids = std::make_shared(element::i32, PartialShape{2, 4, 3}); - auto parent_idx = std::make_shared(element::i32, PartialShape{2, 4, 3}); - auto max_seq_len = std::make_shared(element::i32, PartialShape{4}); - auto end_token = std::make_shared(element::i32, PartialShape{}); + auto step_ids = std::make_shared(element::i32, PartialShape{2, 4, 3}); + auto parent_idx = std::make_shared(element::i32, PartialShape{2, 4, 3}); + auto max_seq_len = std::make_shared(element::i32, PartialShape{4}); + auto end_token = std::make_shared(element::i32, PartialShape{}); op->set_argument(0, step_ids); op->set_argument(1, parent_idx); diff --git a/src/core/tests/type_prop/gelu.cpp b/src/core/tests/type_prop/gelu.cpp index 2aae4de5d7711d..3bbfe70aa63b07 100644 --- a/src/core/tests/type_prop/gelu.cpp +++ b/src/core/tests/type_prop/gelu.cpp @@ -2,17 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/gelu.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; // ------------------------------ V0 ------------------------------ TEST(type_prop, gelu_v0) { const PartialShape param_shape{64, Dimension::dynamic(), 256, Dimension(4, 8)}; - const auto param = std::make_shared(element::f32, param_shape); + const auto param = std::make_shared(element::f32, param_shape); const auto op = std::make_shared(param); ASSERT_EQ(op->get_element_type(), element::f32); ASSERT_EQ(op->get_output_partial_shape(0), param_shape); @@ -20,7 +22,7 @@ TEST(type_prop, gelu_v0) { // ------------------------------ V7 ------------------------------ TEST(type_prop, gelu_default_mode_inference_f32) { - auto param = make_shared(element::f32, Shape{1, 32, 32}); + auto param = make_shared(element::f32, Shape{1, 32, 32}); auto gelu = make_shared(param); ASSERT_EQ(gelu->get_element_type(), element::f32); @@ -29,7 +31,7 @@ TEST(type_prop, gelu_default_mode_inference_f32) { } TEST(type_prop, gelu_default_mode_inference_f16) { - auto param = make_shared(element::f16, Shape{1, 32, 32}); + auto param = make_shared(element::f16, Shape{1, 32, 32}); auto gelu = make_shared(param); ASSERT_EQ(gelu->get_element_type(), element::f16); @@ -38,7 +40,7 @@ TEST(type_prop, gelu_default_mode_inference_f16) { } TEST(type_prop, gelu_tanh_mode_inference_f32) { - auto param = make_shared(element::f32, Shape{1, 32, 32}); + auto param = make_shared(element::f32, Shape{1, 32, 32}); auto gelu = make_shared(param, op::GeluApproximationMode::TANH); ASSERT_EQ(gelu->get_element_type(), element::f32); @@ -47,7 +49,7 @@ TEST(type_prop, gelu_tanh_mode_inference_f32) { } TEST(type_prop, gelu_tanh_mode_inference_f16) { - auto param = make_shared(element::f16, Shape{1, 32, 32}); + auto param = make_shared(element::f16, Shape{1, 32, 32}); auto gelu = make_shared(param, op::GeluApproximationMode::TANH); ASSERT_EQ(gelu->get_element_type(), element::f16); @@ -56,41 +58,41 @@ TEST(type_prop, gelu_tanh_mode_inference_f16) { } TEST(type_prop, gelu_incompatible_input_type_boolean) { - auto param = make_shared(element::boolean, Shape{1, 32, 32}); - ASSERT_THROW(const auto unused = std::make_shared(param), ngraph::NodeValidationFailure); + auto param = make_shared(element::boolean, Shape{1, 32, 32}); + ASSERT_THROW(const auto unused = std::make_shared(param), ov::NodeValidationFailure); } TEST(type_prop, gelu_incompatible_input_type_u16) { - auto param = make_shared(element::u16, Shape{1, 32, 32}); - ASSERT_THROW(const auto unused = std::make_shared(param), ngraph::NodeValidationFailure); + auto param = make_shared(element::u16, Shape{1, 32, 32}); + ASSERT_THROW(const auto unused = std::make_shared(param), ov::NodeValidationFailure); } TEST(type_prop, gelu_incompatible_input_type_i32) { - auto param = make_shared(element::i32, Shape{1, 32, 32}); - ASSERT_THROW(const auto unused = std::make_shared(param), ngraph::NodeValidationFailure); + auto param = make_shared(element::i32, Shape{1, 32, 32}); + ASSERT_THROW(const auto unused = std::make_shared(param), ov::NodeValidationFailure); } TEST(type_prop, gelu_incompatible_input_type_i16) { - auto param = make_shared(element::i16, Shape{1, 32, 32}); - ASSERT_THROW(const auto unused = std::make_shared(param), ngraph::NodeValidationFailure); + auto param = make_shared(element::i16, Shape{1, 32, 32}); + ASSERT_THROW(const auto unused = std::make_shared(param), ov::NodeValidationFailure); } TEST(type_prop, gelu_dynamic_rank_input_shape_2D) { const PartialShape param_shape{Dimension::dynamic(), 10}; - const auto param = std::make_shared(element::f32, param_shape); + const auto param = std::make_shared(element::f32, param_shape); const auto op = std::make_shared(param); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 10})); } TEST(type_prop, gelu_dynamic_rank_input_shape_3D) { const PartialShape param_shape{100, Dimension::dynamic(), 58}; - const auto param = std::make_shared(element::f32, param_shape); + const auto param = std::make_shared(element::f32, param_shape); const auto op = std::make_shared(param); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{100, Dimension(), 58})); } TEST(type_prop, gelu_dynamic_rank_input_shape_full) { - const auto param = std::make_shared(element::f32, PartialShape::dynamic()); + const auto param = std::make_shared(element::f32, PartialShape::dynamic()); const auto op = std::make_shared(param); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } diff --git a/src/core/tests/type_prop/generate_proposals.cpp b/src/core/tests/type_prop/generate_proposals.cpp index 3bc8c0ed22b1d0..06ef6114919707 100644 --- a/src/core/tests/type_prop/generate_proposals.cpp +++ b/src/core/tests/type_prop/generate_proposals.cpp @@ -2,13 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/generate_proposals.hpp" + +#include + #include #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -using namespace ngraph; +using namespace ov; using namespace testing; using GenerateProposals = op::v9::GenerateProposals; @@ -37,10 +39,10 @@ TEST(type_prop, generate_proposals_default_ctor) { attrs.post_nms_count = static_cast(s.post_nms_count); attrs.pre_nms_count = 1000; - auto im_info = std::make_shared(element::f32, s.im_info_shape); - auto anchors = std::make_shared(element::f32, s.anchors_shape); - auto deltas = std::make_shared(element::f32, s.deltas_shape); - auto scores = std::make_shared(element::f32, s.scores_shape); + auto im_info = std::make_shared(element::f32, s.im_info_shape); + auto anchors = std::make_shared(element::f32, s.anchors_shape); + auto deltas = std::make_shared(element::f32, s.deltas_shape); + auto scores = std::make_shared(element::f32, s.scores_shape); auto proposals = std::make_shared(); proposals->set_arguments(OutputVector{im_info, anchors, deltas, scores}); @@ -65,10 +67,10 @@ TEST(type_prop, generate_proposals) { const auto dyn_dim = Dimension::dynamic(); - auto im_info = std::make_shared(element::f32, Shape{1, 4}); - auto anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); - auto deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); - auto scores = std::make_shared(element::f32, Shape{1, 3, 200, 336}); + auto im_info = std::make_shared(element::f32, Shape{1, 4}); + auto anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); + auto deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); + auto scores = std::make_shared(element::f32, Shape{1, 3, 200, 336}); auto proposals = std::make_shared(im_info, anchors, deltas, scores, attrs); @@ -79,10 +81,10 @@ TEST(type_prop, generate_proposals) { EXPECT_EQ(proposals->get_output_partial_shape(1), (PartialShape{{0, 1000}})); EXPECT_EQ(proposals->get_output_partial_shape(2), (PartialShape{1})); - im_info = std::make_shared(element::f32, PartialShape::dynamic(2)); - anchors = std::make_shared(element::f32, PartialShape::dynamic(4)); - deltas = std::make_shared(element::f32, PartialShape::dynamic(4)); - scores = std::make_shared(element::f32, PartialShape::dynamic(4)); + im_info = std::make_shared(element::f32, PartialShape::dynamic(2)); + anchors = std::make_shared(element::f32, PartialShape::dynamic(4)); + deltas = std::make_shared(element::f32, PartialShape::dynamic(4)); + scores = std::make_shared(element::f32, PartialShape::dynamic(4)); proposals = std::make_shared(im_info, anchors, deltas, scores, attrs, element::i32); @@ -94,46 +96,46 @@ TEST(type_prop, generate_proposals) { EXPECT_EQ(proposals->get_output_partial_shape(2), (PartialShape{dyn_dim})); // assert throw - im_info = std::make_shared(element::f32, Shape{1, 4}); - anchors = std::make_shared(element::f32, Shape{100, 336, 3, 4}); - deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); - scores = std::make_shared(element::f32, Shape{1, 3, 200, 336}); + im_info = std::make_shared(element::f32, Shape{1, 4}); + anchors = std::make_shared(element::f32, Shape{100, 336, 3, 4}); + deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); + scores = std::make_shared(element::f32, Shape{1, 3, 200, 336}); ASSERT_THROW(proposals = std::make_shared(im_info, anchors, deltas, scores, attrs, element::i32), ngraph::CheckFailure) << "GenerateProposals node was created with invalid data."; - im_info = std::make_shared(element::f32, Shape{1, 4}); - anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); - deltas = std::make_shared(element::f32, Shape{1, 12, 200, 300}); - scores = std::make_shared(element::f32, Shape{1, 3, 200, 336}); + im_info = std::make_shared(element::f32, Shape{1, 4}); + anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); + deltas = std::make_shared(element::f32, Shape{1, 12, 200, 300}); + scores = std::make_shared(element::f32, Shape{1, 3, 200, 336}); ASSERT_THROW(proposals = std::make_shared(im_info, anchors, deltas, scores, attrs, element::i32), ngraph::CheckFailure) << "GenerateProposals node was created with invalid data."; - im_info = std::make_shared(element::f32, Shape{1, 4}); - anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); - deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); - scores = std::make_shared(element::f32, Shape{1, 4, 200, 336}); + im_info = std::make_shared(element::f32, Shape{1, 4}); + anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); + deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); + scores = std::make_shared(element::f32, Shape{1, 4, 200, 336}); ASSERT_THROW(proposals = std::make_shared(im_info, anchors, deltas, scores, attrs, element::i32), ngraph::CheckFailure) << "GenerateProposals node was created with invalid data."; - im_info = std::make_shared(element::f32, Shape{1, 2}); - anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); - deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); - scores = std::make_shared(element::f32, Shape{1, 4, 200, 336}); + im_info = std::make_shared(element::f32, Shape{1, 2}); + anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); + deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); + scores = std::make_shared(element::f32, Shape{1, 4, 200, 336}); ASSERT_THROW(proposals = std::make_shared(im_info, anchors, deltas, scores, attrs, element::i32), ngraph::CheckFailure) << "GenerateProposals node was created with invalid data."; - im_info = std::make_shared(element::f32, Shape{2, 4}); - anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); - deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); - scores = std::make_shared(element::f32, Shape{1, 4, 200, 336}); + im_info = std::make_shared(element::f32, Shape{2, 4}); + anchors = std::make_shared(element::f32, Shape{200, 336, 3, 4}); + deltas = std::make_shared(element::f32, Shape{1, 12, 200, 336}); + scores = std::make_shared(element::f32, Shape{1, 4, 200, 336}); ASSERT_THROW(proposals = std::make_shared(im_info, anchors, deltas, scores, attrs, element::i32), ngraph::CheckFailure) @@ -285,10 +287,10 @@ TEST(type_prop, generate_proposals_dynamic) { ? 10 : s.deltas_shape.rank().is_static() ? 30 : s.scores_shape.rank().is_static() ? 40 : ov::no_label; - auto im_info = std::make_shared(element::f32, s.im_info_shape); - auto anchors = std::make_shared(element::f32, s.anchors_shape); - auto deltas = std::make_shared(element::f32, s.deltas_shape); - auto scores = std::make_shared(element::f32, s.scores_shape); + auto im_info = std::make_shared(element::f32, s.im_info_shape); + auto anchors = std::make_shared(element::f32, s.anchors_shape); + auto deltas = std::make_shared(element::f32, s.deltas_shape); + auto scores = std::make_shared(element::f32, s.scores_shape); auto proposals = std::make_shared(im_info, anchors, deltas, scores, attrs); diff --git a/src/core/tests/type_prop/grid_sample.cpp b/src/core/tests/type_prop/grid_sample.cpp index 5502fabeaf6261..2afbf061ccf7f4 100644 --- a/src/core/tests/type_prop/grid_sample.cpp +++ b/src/core/tests/type_prop/grid_sample.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/util/attr_types.hpp" #include "openvino/opsets/opset9.hpp" diff --git a/src/core/tests/type_prop/grn.cpp b/src/core/tests/type_prop/grn.cpp index 900644a53d657c..098834e8d62d28 100644 --- a/src/core/tests/type_prop/grn.cpp +++ b/src/core/tests/type_prop/grn.cpp @@ -2,17 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/grn.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, grn) { float bias = 1.25f; Shape data_shape{2, 3, 4, 5}; - auto A = make_shared(element::f32, data_shape); + auto A = make_shared(element::f32, data_shape); auto grn = make_shared(A, bias); EXPECT_EQ(grn->get_element_type(), element::f32); @@ -22,7 +24,7 @@ TEST(type_prop, grn) { TEST(type_prop, grn_dynamic) { float bias = 1.25f; PartialShape data_shape{2, Dimension::dynamic(), 3, Dimension(4, 6)}; - auto A = make_shared(element::f32, data_shape); + auto A = make_shared(element::f32, data_shape); auto grn = make_shared(A, bias); EXPECT_EQ(grn->get_element_type(), element::f32); @@ -31,7 +33,7 @@ TEST(type_prop, grn_dynamic) { TEST(type_prop, grn_invalid_data_rank) { float bias = 1.25f; - auto A = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::f32, Shape{4}); try { auto grn = make_shared(A, bias); @@ -43,7 +45,7 @@ TEST(type_prop, grn_invalid_data_rank) { FAIL() << "Deduced type check failed for unexpected reason"; } - A = make_shared(element::f32, Shape{1, 2, 3, 4, 5}); + A = make_shared(element::f32, Shape{1, 2, 3, 4, 5}); try { auto grn = make_shared(A, bias); diff --git a/src/core/tests/type_prop/group_convolution.cpp b/src/core/tests/type_prop/group_convolution.cpp index 5eea519d099caa..9fc74eb4388219 100644 --- a/src/core/tests/type_prop/group_convolution.cpp +++ b/src/core/tests/type_prop/group_convolution.cpp @@ -2,13 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/op/group_conv.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, group_convolution_auto_padding_same_lower) { @@ -23,8 +25,8 @@ TEST(type_prop, group_convolution_auto_padding_same_lower) { Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -46,8 +48,8 @@ TEST(type_prop, group_convolution_auto_padding_same_upper) { Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto conv = make_shared(data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -63,8 +65,8 @@ TEST(type_prop, group_convolution_auto_padding_same_lower_spatial_dims_static) { const element::Type_t et = element::f32; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -86,8 +88,8 @@ TEST(type_prop, group_convolution_auto_padding_same_upper_spatial_dims_static) { const element::Type_t et = element::f32; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -112,8 +114,8 @@ TEST(type_prop, group_convolution_static_ranks_filters_groups_dyn) { const element::Type_t et = element::f32; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -134,8 +136,8 @@ TEST(type_prop, group_convolution_static_ranks_filters_groups_cout_dyn) { const element::Type_t et = element::f32; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -155,8 +157,8 @@ TEST(type_prop, group_convolution_static_ranks_data_cin_filters_group_dyn) { const element::Type_t et = element::f32; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -176,8 +178,8 @@ TEST(type_prop, group_convolution_auto_padding_same_spatial_dims_dynamic) { const element::Type_t et = element::f32; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -196,8 +198,8 @@ TEST(type_prop, group_convolution_data_batch_dynamic) { const PartialShape filters_pshape{2, 1, 2, 3, 3}; const element::Type_t et = element::f32; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -219,8 +221,8 @@ TEST(type_prop, group_convolution_filters_dynamic_auto_pad_explicit) { const PartialShape filters_pshape{PartialShape::dynamic()}; const element::Type_t et = element::f16; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -243,8 +245,8 @@ TEST(type_prop, group_convolution_filters_dynamic_auto_pad_same) { const element::Type_t et = element::f16; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -264,8 +266,8 @@ TEST(type_prop, group_convolution_data_batch_and_filters_dynamic) { const PartialShape dyn_pshape{PartialShape::dynamic()}; const element::Type_t et = element::f32; - auto data_batch = make_shared(et, dyn_pshape); - auto filters = make_shared(et, dyn_pshape); + auto data_batch = make_shared(et, dyn_pshape); + auto filters = make_shared(et, dyn_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -281,8 +283,8 @@ TEST(type_prop, group_convolution_invalid_et_inputs) { const PartialShape filters_pshape{2, 1, 2, 3, 3}; try { - auto data_batch = make_shared(element::f16, data_batch_pshape); - auto filters = make_shared(element::f32, filters_pshape); + auto data_batch = make_shared(element::f16, data_batch_pshape); + auto filters = make_shared(element::f32, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -300,8 +302,8 @@ TEST(type_prop, group_convolution_invalid_et_inputs) { try { const element::Type boolean_et = element::boolean; - auto data_batch = make_shared(boolean_et, data_batch_pshape); - auto filters = make_shared(boolean_et, filters_pshape); + auto data_batch = make_shared(boolean_et, data_batch_pshape); + auto filters = make_shared(boolean_et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -324,8 +326,8 @@ TEST(type_prop, group_convolution_invalid_input_ranks) { // data partial shape provided is rank 4 (Conv2D) // filter partial shape provided is rank 6 (Conv3D) try { - auto filters = make_shared(et, PartialShape{2, 8, 2, 3, 3, Dimension::dynamic()}); - auto data = make_shared(et, PartialShape{1, 16, 6, 6}); + auto filters = make_shared(et, PartialShape{2, 8, 2, 3, 3, Dimension::dynamic()}); + auto data = make_shared(et, PartialShape{1, 16, 6, 6}); auto groupConv = make_shared(data, filters, Strides{}, @@ -343,8 +345,8 @@ TEST(type_prop, group_convolution_invalid_input_ranks) { // data partial shape provided is rank 5 (Conv3D) // filter partial shape provided is rank 5 (Conv2D) try { - const auto filters = make_shared(et, PartialShape{2, 8, 2, 3, 3}); - const auto data = make_shared(et, PartialShape{1, Dimension::dynamic(), 16, 6, 6}); + const auto filters = make_shared(et, PartialShape{2, 8, 2, 3, 3}); + const auto data = make_shared(et, PartialShape{1, Dimension::dynamic(), 16, 6, 6}); const auto groupConv = make_shared(data, filters, Strides{}, @@ -367,8 +369,8 @@ TEST(type_prop, group_convolution_invalid_input_channel_dims) { const PartialShape data_batch_pshape{1, 6, 5, 5}; const PartialShape filters_pshape{1, 1, 3, 3, 3}; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); OV_EXPECT_THROW( const auto op = make_shared(data_batch, @@ -386,8 +388,8 @@ TEST(type_prop, group_convolution_invalid_input_channel_dims) { const PartialShape data_batch_pshape{1, 3, 5, 5}; const PartialShape filters_pshape{-1, 1, 2, 3, 3}; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); OV_EXPECT_THROW( const auto op = make_shared(data_batch, @@ -413,8 +415,8 @@ TEST(type_prop, group_convolution_invalid_conv_param_spatial_dims) { CoordinateDiff pads_begin{0, 0}; CoordinateDiff pads_end{0, 0}; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, PartialShape::dynamic()); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, PartialShape::dynamic()); auto groupConv = make_shared(data_batch, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid strides spatial dimensions not detected"; @@ -429,8 +431,8 @@ TEST(type_prop, group_convolution_invalid_conv_param_spatial_dims) { CoordinateDiff pads_begin{0, 0}; CoordinateDiff pads_end{0, 0}; - auto data_batch = make_shared(et, PartialShape::dynamic()); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, PartialShape::dynamic()); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid strides spatial dimensions not detected"; @@ -447,8 +449,8 @@ TEST(type_prop, group_convolution_invalid_conv_param_spatial_dims) { CoordinateDiff pads_begin{0, 0}; CoordinateDiff pads_end{0, 0}; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, PartialShape::dynamic()); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, PartialShape::dynamic()); auto groupConv = make_shared(data_batch, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid dilations spatial dimensions not detected"; @@ -463,8 +465,8 @@ TEST(type_prop, group_convolution_invalid_conv_param_spatial_dims) { CoordinateDiff pads_begin{0, 0}; CoordinateDiff pads_end{0, 0}; - auto data_batch = make_shared(et, PartialShape::dynamic()); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, PartialShape::dynamic()); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid dilations spatial dimensions not detected"; @@ -481,8 +483,8 @@ TEST(type_prop, group_convolution_invalid_conv_param_spatial_dims) { CoordinateDiff pads_begin{0, 0, 0}; CoordinateDiff pads_end{0, 0}; - auto data_batch = make_shared(et, PartialShape::dynamic()); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, PartialShape::dynamic()); + auto filters = make_shared(et, filters_pshape); OV_EXPECT_THROW( auto op = @@ -497,8 +499,8 @@ TEST(type_prop, group_convolution_invalid_conv_param_spatial_dims) { CoordinateDiff pads_begin{0, 0}; CoordinateDiff pads_end{0}; - auto data_batch = make_shared(et, PartialShape::dynamic()); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, PartialShape::dynamic()); + auto filters = make_shared(et, filters_pshape); OV_EXPECT_THROW( auto op = @@ -517,8 +519,8 @@ TEST(type_prop, group_convolution_interval_shapes) { const element::Type_t et = element::f32; const auto auto_pad = op::PadType::EXPLICIT; - auto data_batch = make_shared(et, data_batch_pshape); - auto filters = make_shared(et, filters_pshape); + auto data_batch = make_shared(et, data_batch_pshape); + auto filters = make_shared(et, filters_pshape); auto groupConv = make_shared(data_batch, filters, Strides{}, @@ -534,8 +536,8 @@ TEST(type_prop, group_convolution_interval_shapes) { } TEST(type_prop, group_convolution_default_constructed) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto filters = make_shared(element::f32, PartialShape{1, 1, 1, 3, 3}); + const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto filters = make_shared(element::f32, PartialShape{1, 1, 1, 3, 3}); const auto op = make_shared(); op->set_arguments(OutputVector{data, filters}); diff --git a/src/core/tests/type_prop/group_convolution_backprop_data.cpp b/src/core/tests/type_prop/group_convolution_backprop_data.cpp index 9f04906ca03aa0..57a556f2697f2f 100644 --- a/src/core/tests/type_prop/group_convolution_backprop_data.cpp +++ b/src/core/tests/type_prop/group_convolution_backprop_data.cpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/type_prop.hpp" #include "convolution_shape_inference.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, group_convolution_backprop_data_shape_infer) { @@ -16,8 +16,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer) { const PartialShape filters_pshape{2, 8, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, Strides{}, @@ -40,9 +40,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_as const PartialShape filters_pshape{1, 16, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -65,9 +65,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_as const PartialShape filters_pshape{1, 16, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = make_shared(element::i64, Shape{2}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = make_shared(element::i64, Shape{2}); auto gcbd = make_shared(data, filters, output_shape, @@ -85,9 +85,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_st const PartialShape filters_pshape{1, 16, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -103,9 +103,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_st const PartialShape filters_pshape{Dimension::dynamic(), 16, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -125,9 +125,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_st 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -143,9 +143,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_st const PartialShape filters_pshape{Dimension::dynamic(), 16, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -163,9 +163,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_st set_shape_labels(filters_pshape, 20); const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -187,8 +187,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_data_nc const CoordinateDiff padding_begin{1, 1}; const CoordinateDiff padding_end{1, 1}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, @@ -209,8 +209,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_filters const CoordinateDiff padding_begin{1, 1}; const CoordinateDiff padding_end{1, 1}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, @@ -235,8 +235,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_filters const CoordinateDiff padding_begin{1, 1}; const CoordinateDiff padding_end{1, 1}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, @@ -257,8 +257,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_data_ci const CoordinateDiff padding_begin{1, 1}; const CoordinateDiff padding_end{1, 1}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, @@ -281,8 +281,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_filters const CoordinateDiff padding_begin{1, 1}; const CoordinateDiff padding_end{1, 1}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, @@ -304,8 +304,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_data_sp const CoordinateDiff padding_begin{1, 1}; const CoordinateDiff padding_end{1, 1}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, @@ -328,8 +328,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_filters const CoordinateDiff padding_begin{1, 1}; const CoordinateDiff padding_end{1, 1}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, @@ -347,9 +347,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_da const PartialShape filters_pshape{1, 16, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -365,8 +365,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_data_dyn) { const PartialShape filters_pshape{4, 5, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, Strides{}, @@ -383,9 +383,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_fi const PartialShape filters_pshape{PartialShape::dynamic()}; const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -401,8 +401,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_filters_dyn) { const PartialShape filters_pshape{PartialShape::dynamic()}; const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, Strides{}, @@ -419,9 +419,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_as const PartialShape filters_pshape{PartialShape::dynamic()}; const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{3}, {3, 3, 3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {3, 3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -437,9 +437,9 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_as const PartialShape filters_pshape{PartialShape::dynamic()}; const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); - auto output_shape = make_shared(element::i64, Shape{3}); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); + auto output_shape = make_shared(element::i64, Shape{3}); auto gcbd = make_shared(data, filters, output_shape, @@ -455,8 +455,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_data_and_filters_dyn const PartialShape filters_pshape{PartialShape::dynamic()}; const element::Type_t et = element::f32; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, Strides{}, @@ -475,8 +475,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_et_inputs) { const element::Type_t data_et = element::f16; const element::Type_t filters_et = element::f32; - auto data = make_shared(data_et, data_pshape); - auto filters = make_shared(filters_et, filters_pshape); + auto data = make_shared(data_et, data_pshape); + auto filters = make_shared(filters_et, filters_pshape); auto gcbd = make_shared(data, filters, Strides{}, @@ -497,8 +497,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_et_inputs) { const element::Type boolean_et = element::boolean; - auto data = make_shared(boolean_et, data_pshape); - auto filters = make_shared(boolean_et, filters_pshape); + auto data = make_shared(boolean_et, data_pshape); + auto filters = make_shared(boolean_et, filters_pshape); auto gcbd = make_shared(data, filters, Strides{}, @@ -520,9 +520,9 @@ TEST(type_prop, group_convolution_backprop_data_invalid_et_inputs) { const element::Type_t inputs_et = element::f32; - auto data = make_shared(inputs_et, data_pshape); - auto filters = make_shared(inputs_et, filters_pshape); - auto output_shape = op::Constant::create(inputs_et, Shape{2}, {3, 3}); + auto data = make_shared(inputs_et, data_pshape); + auto filters = make_shared(inputs_et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(inputs_et, Shape{2}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -547,8 +547,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_input_ranks) { const element::Type_t inputs_et = element::f32; - auto data = make_shared(inputs_et, data_pshape); - auto filters = make_shared(inputs_et, filters_pshape); + auto data = make_shared(inputs_et, data_pshape); + auto filters = make_shared(inputs_et, filters_pshape); auto gcbd = make_shared(data, filters, Strides{}, @@ -571,8 +571,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_input_ranks) { const element::Type_t inputs_et = element::f32; - auto data = make_shared(inputs_et, data_pshape); - auto filters = make_shared(inputs_et, filters_pshape); + auto data = make_shared(inputs_et, data_pshape); + auto filters = make_shared(inputs_et, filters_pshape); auto gcbd = make_shared(data, filters, Strides{}, @@ -593,9 +593,9 @@ TEST(type_prop, group_convolution_backprop_data_invalid_input_ranks) { const element::Type_t inputs_et = element::f32; - auto data = make_shared(inputs_et, data_pshape); - auto filters = make_shared(inputs_et, filters_pshape); - auto output_shape = op::Constant::create(element::i64, Shape{2, 1}, {3, 3}); + auto data = make_shared(inputs_et, data_pshape); + auto filters = make_shared(inputs_et, filters_pshape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{2, 1}, {3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -621,8 +621,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_input_channel_dims) { const PartialShape data_pshape{1, 16, 5, 5}; // [N, C_IN * GROUPS, H, W] const PartialShape filters_pshape{21, 16, 20, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] - auto data = make_shared(inputs_et, data_pshape); - auto filters = make_shared(inputs_et, filters_pshape); + auto data = make_shared(inputs_et, data_pshape); + auto filters = make_shared(inputs_et, filters_pshape); auto gcbd = make_shared(data, filters, strides, padding, padding, dilations); // data batch shape does not have correct dimension C_IN * GROUPS @@ -640,8 +640,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_input_channel_dims) { const PartialShape data_pshape{1, 16, 5, 5}; // [N, C_IN * GROUPS, H, W] const PartialShape filters_pshape{4, 16, 20, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW] - auto data = make_shared(inputs_et, data_pshape); - auto filters = make_shared(inputs_et, filters_pshape); + auto data = make_shared(inputs_et, data_pshape); + auto filters = make_shared(inputs_et, filters_pshape); auto gcbd = make_shared(data, filters, strides, padding, padding, dilations); // filter shape specifies GROUPS = 4 and C_IN = 16, while data batch shape specifies @@ -663,9 +663,9 @@ TEST(type_prop, group_convolution_backprop_data_invalid_output_shape_spatial_dim const element::Type_t inputs_et = element::f32; try { - auto data = make_shared(inputs_et, data_pshape); - auto filters = make_shared(inputs_et, filters_shape); - auto output_shape = op::Constant::create(element::i64, Shape{3}, {3, 3, 3}); + auto data = make_shared(inputs_et, data_pshape); + auto filters = make_shared(inputs_et, filters_shape); + auto output_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {3, 3, 3}); auto gcbd = make_shared(data, filters, output_shape, @@ -694,8 +694,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) const CoordinateDiff pads_begin{0, 0}; const CoordinateDiff pads_end{0, 0}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, PartialShape::dynamic()); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, PartialShape::dynamic()); auto gcbd = make_shared(data, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid strides spatial dimensions not detected"; @@ -710,8 +710,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) const CoordinateDiff pads_begin{0, 0}; const CoordinateDiff pads_end{0, 0}; - auto data = make_shared(et, PartialShape::dynamic()); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, PartialShape::dynamic()); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid strides spatial dimensions not detected"; @@ -728,8 +728,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) const CoordinateDiff pads_begin{0, 0}; const CoordinateDiff pads_end{0, 0}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, PartialShape::dynamic()); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, PartialShape::dynamic()); auto gcbd = make_shared(data, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid dilations spatial dimensions not detected"; @@ -744,8 +744,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) const CoordinateDiff pads_begin{0, 0}; const CoordinateDiff pads_end{0, 0}; - auto data = make_shared(et, PartialShape::dynamic()); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, PartialShape::dynamic()); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid dilations spatial dimensions not detected"; @@ -762,8 +762,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) const CoordinateDiff pads_begin{0, 0, 0}; const CoordinateDiff pads_end{0, 0}; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, PartialShape::dynamic()); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, PartialShape::dynamic()); auto gcbd = make_shared(data, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid padding spatial dimensions not detected"; @@ -778,8 +778,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) const CoordinateDiff pads_begin{0, 0}; const CoordinateDiff pads_end{0}; - auto data = make_shared(et, PartialShape::dynamic()); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, PartialShape::dynamic()); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, pads_begin, pads_end, dilations); FAIL() << "Invalid padding spatial dimensions not detected"; @@ -798,8 +798,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) const CoordinateDiff output_padding{0, 0, 0}; const op::PadType auto_pad = op::PadType::EXPLICIT; - auto data = make_shared(et, data_pshape); - auto filters = make_shared(et, PartialShape::dynamic()); + auto data = make_shared(et, data_pshape); + auto filters = make_shared(et, PartialShape::dynamic()); auto gcbd = make_shared(data, filters, strides, @@ -822,8 +822,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) const CoordinateDiff output_padding{0}; const op::PadType auto_pad = op::PadType::EXPLICIT; - auto data = make_shared(et, PartialShape::dynamic()); - auto filters = make_shared(et, filters_pshape); + auto data = make_shared(et, PartialShape::dynamic()); + auto filters = make_shared(et, filters_pshape); auto gcbd = make_shared(data, filters, strides, @@ -841,9 +841,9 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims) } TEST(type_prop, group_convolution_backprop_data_default_constructed) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto filters = make_shared(element::f32, PartialShape{1, 1, 1, 3, 3, 3}); - const auto out_spatial = op::Constant::create(element::i32, Shape{3}, {5, 4, 10}); + const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto filters = make_shared(element::f32, PartialShape{1, 1, 1, 3, 3, 3}); + const auto out_spatial = ov::op::v0::Constant::create(element::i32, Shape{3}, {5, 4, 10}); const auto op = make_shared(); op->set_arguments(OutputVector{data, filters, out_spatial}); @@ -874,9 +874,10 @@ TEST(type_prop, group_convolution_backprop_data_interval_shapes) { const element::Type_t et = element::f32; const auto auto_pad = op::PadType::SAME_LOWER; - const auto data_batch = make_shared(et, data_batch_pshape); - const auto filters = make_shared(et, filters_pshape); - const auto out_spatial_shape_of = make_shared(make_shared(et, out_spatial_pshape)); + const auto data_batch = make_shared(et, data_batch_pshape); + const auto filters = make_shared(et, filters_pshape); + const auto out_spatial_shape_of = + make_shared(make_shared(et, out_spatial_pshape)); const auto op = make_shared(data_batch, filters, out_spatial_shape_of, diff --git a/src/core/tests/type_prop/group_normalization.cpp b/src/core/tests/type_prop/group_normalization.cpp index 510da87254a822..84ec5f98319a33 100644 --- a/src/core/tests/type_prop/group_normalization.cpp +++ b/src/core/tests/type_prop/group_normalization.cpp @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/openvino.hpp" #include "openvino/opsets/opset12.hpp" diff --git a/src/core/tests/type_prop/gru_cell.cpp b/src/core/tests/type_prop/gru_cell.cpp index c4926cd2679d41..7d5ea237ec4f14 100644 --- a/src/core/tests/type_prop/gru_cell.cpp +++ b/src/core/tests/type_prop/gru_cell.cpp @@ -2,14 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/gru_cell.hpp" + +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset4.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, gru_cell) { @@ -18,12 +19,12 @@ TEST(type_prop, gru_cell) { const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto X = make_shared(element::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); + const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); EXPECT_EQ(gru_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); } @@ -34,13 +35,13 @@ TEST(type_prop, gru_cell_with_bias) { const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); + const auto X = make_shared(element::f32, Shape{batch_size, input_size}); + const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); + const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto gru_cell = make_shared(X, H_t, W, R, B, hidden_size); + const auto gru_cell = make_shared(X, H_t, W, R, B, hidden_size); EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); EXPECT_EQ(gru_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); } @@ -51,23 +52,23 @@ TEST(type_prop, gru_cell_with_bias_linear_before) { const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{(gates_count + 1) * hidden_size}); - - const auto gru_cell = make_shared(X, - H_t, - W, - R, - B, - hidden_size, - std::vector{"sigmoid", "tanh"}, - std::vector{}, - std::vector{}, - 0.f, - true); + const auto X = make_shared(element::f32, Shape{batch_size, input_size}); + const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); + const auto B = make_shared(element::f32, Shape{(gates_count + 1) * hidden_size}); + + const auto gru_cell = make_shared(X, + H_t, + W, + R, + B, + hidden_size, + std::vector{"sigmoid", "tanh"}, + std::vector{}, + std::vector{}, + 0.f, + true); EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); EXPECT_EQ(gru_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); @@ -79,13 +80,13 @@ TEST(type_prop, gru_cell_default_ctor_linear_before) { const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{(gates_count + 1) * hidden_size}); + const auto X = make_shared(element::f32, Shape{batch_size, input_size}); + const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); + const auto B = make_shared(element::f32, Shape{(gates_count + 1) * hidden_size}); - const auto gru_cell = make_shared(); + const auto gru_cell = make_shared(); gru_cell->set_linear_before_reset(true); gru_cell->set_arguments(OutputVector{X, H_t, W, R, B}); gru_cell->validate_and_infer_types(); @@ -100,34 +101,34 @@ TEST(type_prop, gru_cell_invalid_input) { const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto X = make_shared(element::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); + auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); // Invalid W tensor shape. - auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - OV_EXPECT_THROW(auto op = make_shared(X, H_t, W, R, hidden_size), + auto W = make_shared(element::f32, Shape{hidden_size, input_size}); + OV_EXPECT_THROW(auto op = make_shared(X, H_t, W, R, hidden_size), ov::NodeValidationFailure, HasSubstr("First dimension of W input shape is required to be compatible")); // Invalid R tensor shape. - W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - R = make_shared(element::f32, Shape{hidden_size, 1}); - OV_EXPECT_THROW(auto op = make_shared(X, H_t, W, R, hidden_size), + W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); + R = make_shared(element::f32, Shape{hidden_size, 1}); + OV_EXPECT_THROW(auto op = make_shared(X, H_t, W, R, hidden_size), ov::NodeValidationFailure, HasSubstr("Dimension `hidden_size` is not matched between inputs")); // Invalid H_t tensor shape. - R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - H_t = make_shared(element::f32, Shape{4, hidden_size}); - OV_EXPECT_THROW(auto op = make_shared(X, H_t, W, R, hidden_size), + R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); + H_t = make_shared(element::f32, Shape{4, hidden_size}); + OV_EXPECT_THROW(auto op = make_shared(X, H_t, W, R, hidden_size), ov::NodeValidationFailure, HasSubstr("Dimension `batch_size` is not matched between inputs")); // Invalid B tensor shape. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto B = make_shared(element::f32, Shape{hidden_size}); - OV_EXPECT_THROW(auto op = make_shared(X, H_t, W, R, B, hidden_size), + H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto B = make_shared(element::f32, Shape{hidden_size}); + OV_EXPECT_THROW(auto op = make_shared(X, H_t, W, R, B, hidden_size), ov::NodeValidationFailure, HasSubstr("First dimension of B input shape is required to be compatible")); } @@ -138,12 +139,14 @@ TEST(type_prop, gru_cell_dynamic_batch_size) { const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); - const auto R = make_shared(element::f32, PartialShape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); + const auto W = + make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); + const auto R = + make_shared(element::f32, PartialShape{gates_count * hidden_size, hidden_size}); + const auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); + const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -154,12 +157,14 @@ TEST(type_prop, gru_cell_dynamic_hidden_size) { const auto hidden_size = Dimension::dynamic(); const size_t gates_count = 3; - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, PartialShape{hidden_size * gates_count, input_size}); - const auto R = make_shared(element::f32, PartialShape{hidden_size * gates_count, hidden_size}); - const auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); + const auto W = + make_shared(element::f32, PartialShape{hidden_size * gates_count, input_size}); + const auto R = + make_shared(element::f32, PartialShape{hidden_size * gates_count, hidden_size}); + const auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - const auto gru_cell = make_shared(X, H_t, W, R, 3); + const auto gru_cell = make_shared(X, H_t, W, R, 3); EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -169,12 +174,12 @@ TEST(type_prop, gru_cell_dynamic_inputs) { const auto input_size = Dimension::dynamic(); const auto hidden_size = Dimension::dynamic(); - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - const auto R = make_shared(element::f32, PartialShape{hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); + const auto W = make_shared(element::f32, PartialShape{hidden_size, input_size}); + const auto R = make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + const auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - const auto gru_cell = make_shared(X, H_t, W, R, 2); + const auto gru_cell = make_shared(X, H_t, W, R, 2); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); @@ -186,42 +191,42 @@ TEST(type_prop, gru_cell_invalid_input_rank0) { const size_t hidden_size = 3; const size_t gates_count = 3; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto R = make_shared(element::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); + auto R = make_shared(element::f32, PartialShape{gates_count * hidden_size, hidden_size}); + auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); // Invalid rank0 for W tensor. - auto W = make_shared(element::f32, PartialShape{}); - ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, hidden_size), - ngraph::NodeValidationFailure) + auto W = make_shared(element::f32, PartialShape{}); + ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, hidden_size), + ov::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for X tensor. - W = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); - X = make_shared(element::f32, PartialShape{}); - ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, hidden_size), - ngraph::NodeValidationFailure) + W = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); + X = make_shared(element::f32, PartialShape{}); + ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, hidden_size), + ov::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape{}); - ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, hidden_size), - ngraph::NodeValidationFailure) + X = make_shared(element::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::f32, PartialShape{}); + ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, hidden_size), + ov::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for R tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape{}); - ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, hidden_size), - ngraph::NodeValidationFailure) + H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::f32, PartialShape{}); + ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, hidden_size), + ov::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for B tensor. - R = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); - auto B = make_shared(element::f32, PartialShape{}); - ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, B, hidden_size), - ngraph::NodeValidationFailure) + R = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); + auto B = make_shared(element::f32, PartialShape{}); + ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, B, hidden_size), + ov::NodeValidationFailure) << "GRUCell node was created with invalid data."; } @@ -231,41 +236,41 @@ TEST(type_prop, gru_cell_input_dynamic_rank) { int64_t hidden_size = 3; int64_t gates_count = 3; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto R = make_shared(element::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); + auto R = make_shared(element::f32, PartialShape{gates_count * hidden_size, hidden_size}); + auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - auto check_dynamic_gru = [&](const shared_ptr& gru) -> bool { + auto check_dynamic_gru = [&](const shared_ptr& gru) -> bool { return gru->output(0).get_partial_shape() == PartialShape{batch_size, hidden_size} && gru->output(0).get_element_type() == gru->input(0).get_element_type(); }; // Dynamic rank for W tensor. - auto W = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto gru_w = make_shared(X, H_t, W, R, hidden_size); + auto W = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto gru_w = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_w), true); // Dynamic rank for X tensor. - W = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); - X = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto gru_x = make_shared(X, H_t, W, R, hidden_size); + W = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); + X = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto gru_x = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_x), true); // Dynamic rank for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto gru_h = make_shared(X, H_t, W, R, hidden_size); + X = make_shared(element::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto gru_h = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_h), true); // Dynamic rank for R tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto gru_r = make_shared(X, H_t, W, R, hidden_size); + H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto gru_r = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_r), true); // Dynamic rank for B tensor. - R = make_shared(element::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto gru_b = make_shared(X, H_t, W, R, B, hidden_size); + R = make_shared(element::f32, PartialShape{gates_count * hidden_size, hidden_size}); + auto B = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto gru_b = make_shared(X, H_t, W, R, B, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_b), true); } diff --git a/src/core/tests/type_prop/gru_sequence.cpp b/src/core/tests/type_prop/gru_sequence.cpp index b9f862018aaed6..556c750e9622d9 100644 --- a/src/core/tests/type_prop/gru_sequence.cpp +++ b/src/core/tests/type_prop/gru_sequence.cpp @@ -2,13 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/gru_sequence.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset5.hpp" +#include "openvino/op/parameter.hpp" using namespace std; -using namespace ngraph; +using namespace ov; struct gru_sequence_parameters { Dimension batch_size = 8; @@ -16,10 +18,10 @@ struct gru_sequence_parameters { Dimension seq_length = 6; Dimension input_size = 4; Dimension hidden_size = 128; - ngraph::element::Type et = element::f32; + ov::element::Type et = element::f32; }; -shared_ptr gru_seq_tensor_initialization(const gru_sequence_parameters& param) { +shared_ptr gru_seq_tensor_initialization(const gru_sequence_parameters& param) { auto batch_size = param.batch_size; auto seq_length = param.seq_length; auto input_size = param.input_size; @@ -27,15 +29,15 @@ shared_ptr gru_seq_tensor_initialization(const gru_sequence auto hidden_size = param.hidden_size; auto et = param.et; - const auto X = make_shared(et, PartialShape{batch_size, seq_length, input_size}); + const auto X = make_shared(et, PartialShape{batch_size, seq_length, input_size}); const auto initial_hidden_state = - make_shared(et, PartialShape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(et, PartialShape{batch_size}); - const auto W = make_shared(et, PartialShape{num_directions, hidden_size * 3, input_size}); - const auto R = make_shared(et, PartialShape{num_directions, hidden_size * 3, hidden_size}); - const auto B = make_shared(et, PartialShape{num_directions, hidden_size * 3}); + make_shared(et, PartialShape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = make_shared(et, PartialShape{batch_size}); + const auto W = make_shared(et, PartialShape{num_directions, hidden_size * 3, input_size}); + const auto R = make_shared(et, PartialShape{num_directions, hidden_size * 3, hidden_size}); + const auto B = make_shared(et, PartialShape{num_directions, hidden_size * 3}); - const auto gru_sequence = make_shared(); + const auto gru_sequence = make_shared(); gru_sequence->set_argument(0, X); gru_sequence->set_argument(1, initial_hidden_state); @@ -47,8 +49,8 @@ shared_ptr gru_seq_tensor_initialization(const gru_sequence return gru_sequence; } -shared_ptr gru_seq_direction_initialization(const gru_sequence_parameters& param, - op::RecurrentSequenceDirection direction) { +shared_ptr gru_seq_direction_initialization(const gru_sequence_parameters& param, + op::RecurrentSequenceDirection direction) { auto batch_size = param.batch_size; auto seq_length = param.seq_length; auto input_size = param.input_size; @@ -57,22 +59,22 @@ shared_ptr gru_seq_direction_initialization(const gru_seque auto hidden_size_value = hidden_size.is_dynamic() ? 0 : hidden_size.get_length(); auto et = param.et; - const auto X = make_shared(et, PartialShape{batch_size, seq_length, input_size}); + const auto X = make_shared(et, PartialShape{batch_size, seq_length, input_size}); const auto initial_hidden_state = - make_shared(et, PartialShape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(et, PartialShape{batch_size}); - const auto W = make_shared(et, PartialShape{num_directions, hidden_size * 3, input_size}); - const auto R = make_shared(et, PartialShape{num_directions, hidden_size * 3, hidden_size}); - const auto B = make_shared(et, PartialShape{num_directions, hidden_size * 3}); - - auto gru_sequence = make_shared(X, - initial_hidden_state, - sequence_lengths, - W, - R, - B, - hidden_size_value, - direction); + make_shared(et, PartialShape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = make_shared(et, PartialShape{batch_size}); + const auto W = make_shared(et, PartialShape{num_directions, hidden_size * 3, input_size}); + const auto R = make_shared(et, PartialShape{num_directions, hidden_size * 3, hidden_size}); + const auto B = make_shared(et, PartialShape{num_directions, hidden_size * 3}); + + auto gru_sequence = make_shared(X, + initial_hidden_state, + sequence_lengths, + W, + R, + B, + hidden_size_value, + direction); return gru_sequence; } @@ -84,18 +86,25 @@ TEST(type_prop, gru_sequence_forward) { const size_t input_size = 4; const size_t hidden_size = 128; - const auto X = make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::f32, Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = - make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); - const auto W = make_shared(element::f32, Shape{num_directions, 3 * hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{num_directions, 3 * hidden_size}); + make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + const auto W = make_shared(element::f32, Shape{num_directions, 3 * hidden_size, input_size}); + const auto R = + make_shared(element::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); + const auto B = make_shared(element::f32, Shape{num_directions, 3 * hidden_size}); const auto direction = op::RecurrentSequenceDirection::FORWARD; - const auto sequence = - make_shared(X, initial_hidden_state, sequence_lengths, W, R, B, hidden_size, direction); + const auto sequence = make_shared(X, + initial_hidden_state, + sequence_lengths, + W, + R, + B, + hidden_size, + direction); EXPECT_EQ(sequence->get_hidden_size(), hidden_size); EXPECT_EQ(sequence->get_direction(), op::RecurrentSequenceDirection::FORWARD); @@ -119,30 +128,31 @@ TEST(type_prop, gru_sequence_bidirectional) { const size_t input_size = 4; const size_t hidden_size = 128; - const auto X = make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::f32, Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = - make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); - const auto W = make_shared(element::f32, Shape{num_directions, 3 * hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{num_directions, 3 * hidden_size}); + make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + const auto W = make_shared(element::f32, Shape{num_directions, 3 * hidden_size, input_size}); + const auto R = + make_shared(element::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); + const auto B = make_shared(element::f32, Shape{num_directions, 3 * hidden_size}); const auto direction = op::RecurrentSequenceDirection::BIDIRECTIONAL; const std::vector activations_alpha = {2.7f, 7.0f, 32.367f}; const std::vector activations_beta = {0.0f, 5.49f, 6.0f}; const std::vector activations = {"tanh", "sigmoid"}; - const auto sequence = make_shared(X, - initial_hidden_state, - sequence_lengths, - W, - R, - B, - hidden_size, - direction, - activations, - activations_alpha, - activations_beta); + const auto sequence = make_shared(X, + initial_hidden_state, + sequence_lengths, + W, + R, + B, + hidden_size, + direction, + activations, + activations_alpha, + activations_beta); EXPECT_EQ(sequence->get_hidden_size(), hidden_size); EXPECT_EQ(sequence->get_direction(), op::RecurrentSequenceDirection::BIDIRECTIONAL); @@ -249,7 +259,7 @@ TEST(type_prop, gru_sequence_invalid_input_dimension) { param.et = element::f32; auto gru_sequence = gru_seq_tensor_initialization(param); - auto invalid_rank0_tensor = make_shared(param.et, PartialShape{}); + auto invalid_rank0_tensor = make_shared(param.et, PartialShape{}); // Validate invalid rank0 tensor for all inputs: X, initial_hidden_state, W, R, B for (size_t i = 0; i < gru_sequence->get_input_size(); i++) { @@ -291,7 +301,7 @@ TEST(type_prop, gru_sequence_input_dynamic_rank) { param.et = element::f32; auto gru_sequence = gru_seq_tensor_initialization(param); - auto dynamic_tensor = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); + auto dynamic_tensor = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); for (size_t i = 0; i < gru_sequence->get_input_size(); i++) { gru_sequence = gru_seq_tensor_initialization(param); @@ -321,21 +331,22 @@ TEST(type_prop, gru_sequence_all_inputs_dynamic_rank) { param.hidden_size = 128; param.et = element::f32; - const auto X = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); - const auto initial_hidden_state = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); - const auto sequence_lengths = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); - const auto W = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); - const auto R = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); - const auto B = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); - - const auto gru_sequence = make_shared(X, - initial_hidden_state, - sequence_lengths, - W, - R, - B, - param.hidden_size.get_length(), - op::RecurrentSequenceDirection::FORWARD); + const auto X = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); + const auto initial_hidden_state = + make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); + const auto sequence_lengths = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); + const auto W = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); + const auto R = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); + const auto B = make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); + + const auto gru_sequence = make_shared(X, + initial_hidden_state, + sequence_lengths, + W, + R, + B, + param.hidden_size.get_length(), + op::RecurrentSequenceDirection::FORWARD); EXPECT_EQ(gru_sequence->get_output_partial_shape(0), (PartialShape{-1, 1, -1, -1})); EXPECT_EQ(gru_sequence->get_output_partial_shape(1), (PartialShape{-1, 1, -1})); EXPECT_EQ(gru_sequence->get_output_element_type(0), param.et); diff --git a/src/core/tests/type_prop/hard_sigmoid.cpp b/src/core/tests/type_prop/hard_sigmoid.cpp index 40e1bb0d12e6c8..3de9a5aa00cbf0 100644 --- a/src/core/tests/type_prop/hard_sigmoid.cpp +++ b/src/core/tests/type_prop/hard_sigmoid.cpp @@ -2,20 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/hard_sigmoid.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, hardsigmoid) { const Shape data_shape{3, 5}; - const auto P = make_shared(element::f32, data_shape); - const auto alpha = op::Constant::create(P->get_element_type(), Shape{}, {0.1f}); - const auto beta = op::Constant::create(P->get_element_type(), Shape{}, {1.2f}); - const auto H = make_shared(P, alpha, beta); + const auto P = make_shared(element::f32, data_shape); + const auto alpha = op::v0::Constant::create(P->get_element_type(), Shape{}, {0.1f}); + const auto beta = op::v0::Constant::create(P->get_element_type(), Shape{}, {1.2f}); + const auto H = make_shared(P, alpha, beta); ASSERT_EQ(H->get_element_type(), element::f32); ASSERT_EQ(H->get_shape(), data_shape); } diff --git a/src/core/tests/type_prop/hsigmoid.cpp b/src/core/tests/type_prop/hsigmoid.cpp index 828efad688706b..979ec92cff60f0 100644 --- a/src/core/tests/type_prop/hsigmoid.cpp +++ b/src/core/tests/type_prop/hsigmoid.cpp @@ -2,34 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/hsigmoid.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, hsigmoid) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); auto hsigmoid_func = make_shared(data); EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); EXPECT_EQ(hsigmoid_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, hsigmoid_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hsigmoid_func = make_shared(data); EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); ASSERT_TRUE(hsigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown auto hsigmoid_partial = - make_shared(make_shared(element::f32, PartialShape::dynamic())); + make_shared(make_shared(element::f32, PartialShape::dynamic())); ASSERT_TRUE(hsigmoid_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, hsigmoid_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hsigmoid_func = make_shared(data); EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); ASSERT_TRUE(hsigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); diff --git a/src/core/tests/type_prop/hswish.cpp b/src/core/tests/type_prop/hswish.cpp index e29c865286b15f..bca5d101a9096d 100644 --- a/src/core/tests/type_prop/hswish.cpp +++ b/src/core/tests/type_prop/hswish.cpp @@ -2,34 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/hswish.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, hswish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); auto hswish_func = make_shared(data); EXPECT_EQ(hswish_func->get_element_type(), element::f32); EXPECT_EQ(hswish_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, hswish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hswish_func = make_shared(data); EXPECT_EQ(hswish_func->get_element_type(), element::f32); ASSERT_TRUE(hswish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown auto hswish_partial = - make_shared(make_shared(element::f32, PartialShape::dynamic())); + make_shared(make_shared(element::f32, PartialShape::dynamic())); ASSERT_TRUE(hswish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, hswish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hswish_func = make_shared(data); EXPECT_EQ(hswish_func->get_element_type(), element::f32); ASSERT_TRUE(hswish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); diff --git a/src/core/tests/type_prop/idft.cpp b/src/core/tests/type_prop/idft.cpp index 95d641f206b0b4..486ce3052b3e5a 100644 --- a/src/core/tests/type_prop/idft.cpp +++ b/src/core/tests/type_prop/idft.cpp @@ -2,11 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/idft.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" -using namespace ngraph; +using namespace ov; struct ConstantAxesAndConstantSignalSizeTestParams { PartialShape input_shape; @@ -22,15 +26,15 @@ struct ConstantAxesAndConstantSignalSizeTest : ::testing::TestWithParam(element::f32, params.input_shape); - auto axes_input = op::Constant::create(element::i64, params.axes_shape, params.axes); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = ov::op::v0::Constant::create(element::i64, params.axes_shape, params.axes); std::shared_ptr idft; if (params.signal_size.empty()) { idft = std::make_shared(data, axes_input); } else { auto signal_size_input = - op::Constant::create(element::i64, params.signal_size_shape, params.signal_size); + ov::op::v0::Constant::create(element::i64, params.signal_size_shape, params.signal_size); idft = std::make_shared(data, axes_input, signal_size_input); } @@ -175,8 +179,8 @@ TEST(type_prop, idft_dynamic_axes) { const auto ref_output_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension(1, 18)}; - auto data = std::make_shared(element::f32, input_shape); - auto axes_input = std::make_shared(element::i64, axes_shape); + auto data = std::make_shared(element::f32, input_shape); + auto axes_input = std::make_shared(element::i64, axes_shape); auto idft = std::make_shared(data, axes_input); EXPECT_EQ(idft->get_element_type(), element::f32); @@ -194,8 +198,8 @@ struct NonConstantAxesTest : ::testing::TestWithParam TEST_P(NonConstantAxesTest, idft_non_constant_axes) { auto params = GetParam(); - auto data = std::make_shared(element::f32, params.input_shape); - auto axes_input = std::make_shared(element::i64, params.axes_shape); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = std::make_shared(element::i64, params.axes_shape); auto idft = std::make_shared(data, axes_input); EXPECT_EQ(idft->get_element_type(), element::f32); @@ -267,9 +271,9 @@ struct NonConstantSignalSizeTest : ::testing::TestWithParam(element::f32, params.input_shape); - auto axes_input = op::Constant::create(element::i64, params.axes_shape, params.axes); - auto signal_size_input = std::make_shared(element::i64, params.signal_size_shape); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = ov::op::v0::Constant::create(element::i64, params.axes_shape, params.axes); + auto signal_size_input = std::make_shared(element::i64, params.signal_size_shape); auto idft = std::make_shared(data, axes_input, signal_size_input); EXPECT_EQ(idft->get_element_type(), element::f32); @@ -298,10 +302,10 @@ INSTANTIATE_TEST_SUITE_P( PrintToDummyParamName()); TEST(type_prop, idft_invalid_input) { - auto axes = op::Constant::create(element::i64, Shape{2}, {0, 1}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 1}); try { - auto data = std::make_shared(element::f32, Shape{2}); + auto data = std::make_shared(element::f32, Shape{2}); auto idft = std::make_shared(data, axes); FAIL() << "IDFT node was created with invalid input."; } catch (const NodeValidationFailure& error) { @@ -309,7 +313,7 @@ TEST(type_prop, idft_invalid_input) { } try { - auto data = std::make_shared(element::f32, Shape{4, 3}); + auto data = std::make_shared(element::f32, Shape{4, 3}); auto idft = std::make_shared(data, axes); FAIL() << "IDFT node was created with invalid input."; } catch (const NodeValidationFailure& error) { @@ -317,7 +321,7 @@ TEST(type_prop, idft_invalid_input) { } try { - auto data = std::make_shared(element::f32, Shape{4, 2}); + auto data = std::make_shared(element::f32, Shape{4, 2}); auto idft = std::make_shared(data, axes); FAIL() << "IDFT node was created with invalid input."; } catch (const NodeValidationFailure& error) { @@ -326,10 +330,10 @@ TEST(type_prop, idft_invalid_input) { } TEST(type_prop, idft_invalid_axes) { - auto data = std::make_shared(element::f32, Shape{4, 3, 2}); + auto data = std::make_shared(element::f32, Shape{4, 3, 2}); try { - auto axes = op::Constant::create(element::i64, Shape{1}, {3}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {3}); auto idft = std::make_shared(data, axes); FAIL() << "IDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -337,7 +341,7 @@ TEST(type_prop, idft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{1}, {-3}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {-3}); auto idft = std::make_shared(data, axes); FAIL() << "IDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -345,7 +349,7 @@ TEST(type_prop, idft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{2}, {0, -2}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, -2}); auto idft = std::make_shared(data, axes); FAIL() << "IDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -353,7 +357,7 @@ TEST(type_prop, idft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{1}, {2}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {2}); auto idft = std::make_shared(data, axes); FAIL() << "IDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -361,7 +365,7 @@ TEST(type_prop, idft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{1, 2}, {0, 1}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1, 2}, {0, 1}); auto idft = std::make_shared(data, axes); FAIL() << "IDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -370,11 +374,11 @@ TEST(type_prop, idft_invalid_axes) { } TEST(type_prop, idft_invalid_signal_size) { - auto data = std::make_shared(element::f32, Shape{4, 3, 2}); - auto axes = op::Constant::create(element::i64, Shape{1}, {0}); + auto data = std::make_shared(element::f32, Shape{4, 3, 2}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {0}); try { - auto signal_size = op::Constant::create(element::i64, Shape{1, 2}, {0, 1}); + auto signal_size = ov::op::v0::Constant::create(element::i64, Shape{1, 2}, {0, 1}); auto idft = std::make_shared(data, axes, signal_size); FAIL() << "IDFT node was created with invalid signal size."; } catch (const NodeValidationFailure& error) { @@ -382,7 +386,7 @@ TEST(type_prop, idft_invalid_signal_size) { } try { - auto signal_size = op::Constant::create(element::i64, Shape{2}, {0, 1}); + auto signal_size = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 1}); auto idft = std::make_shared(data, axes, signal_size); FAIL() << "IDFT node was created with invalid signal size."; } catch (const NodeValidationFailure& error) { @@ -396,9 +400,9 @@ TEST(type_prop, idft_dynamic_types) { const auto signal_size_shape = PartialShape::dynamic(); const auto ref_output_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 2}; - auto data = std::make_shared(element::dynamic, input_shape); - auto axes_input = std::make_shared(element::dynamic, axes_shape); - auto signal_size_input = std::make_shared(element::dynamic, signal_size_shape); + auto data = std::make_shared(element::dynamic, input_shape); + auto axes_input = std::make_shared(element::dynamic, axes_shape); + auto signal_size_input = std::make_shared(element::dynamic, signal_size_shape); auto idft = std::make_shared(data, axes_input, signal_size_input); EXPECT_EQ(idft->get_element_type(), element::dynamic); diff --git a/src/core/tests/type_prop/if.cpp b/src/core/tests/type_prop/if.cpp index 42a91f8b27420f..025081b52a0593 100644 --- a/src/core/tests/type_prop/if.cpp +++ b/src/core/tests/type_prop/if.cpp @@ -2,37 +2,44 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/if.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset5.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/reduce_mean.hpp" +#include "openvino/op/result.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, if_simple_test) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Yt = make_shared(element::f32, PartialShape::dynamic()); - auto Xe = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Yt = make_shared(element::f32, PartialShape::dynamic()); + auto Xe = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Yt); auto convert_then_op = std::make_shared(then_op, element::f32); - auto then_op_res = std::make_shared(convert_then_op); + auto then_op_res = std::make_shared(convert_then_op); auto then_body = make_shared(OutputVector{then_op_res}, ParameterVector{Xt, Yt}); auto else_op = std::make_shared(Xe, Ye); auto convert_else_op = std::make_shared(else_op, element::f32); - auto else_op_res = std::make_shared(convert_else_op); + auto else_op_res = std::make_shared(convert_else_op); auto else_body = make_shared(OutputVector{else_op_res}, ParameterVector{Xe, Ye}); auto if_op = make_shared(cond); if_op->set_then_body(then_body); @@ -40,7 +47,7 @@ TEST(type_prop, if_simple_test) { if_op->set_input(X, Xt, Xe); if_op->set_input(Y, Yt, Ye); auto res = if_op->set_output(then_op_res, else_op_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); Shape out0_shape{32, 40, 10}; auto sh = result0->get_output_shape(0); EXPECT_EQ(sh, out0_shape); @@ -54,23 +61,23 @@ TEST(type_prop, if_simple_test) { TEST(type_prop, if_non_const_condition_test) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto cond = make_shared(element::boolean, Shape{1}); + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto cond = make_shared(element::boolean, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Yt = make_shared(element::f32, PartialShape::dynamic()); - auto Xe = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Yt = make_shared(element::f32, PartialShape::dynamic()); + auto Xe = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Yt); - auto then_body_res = make_shared(then_op); + auto then_body_res = make_shared(then_op); auto then_body = make_shared(OutputVector{then_body_res}, ParameterVector{Xt, Yt}); auto else_op = std::make_shared(Xe, Ye); - auto else_body_res = make_shared(else_op); + auto else_body_res = make_shared(else_op); auto else_body = make_shared(OutputVector{else_body_res}, ParameterVector{Xe, Ye}); auto if_op = make_shared(cond); @@ -79,31 +86,31 @@ TEST(type_prop, if_non_const_condition_test) { if_op->set_input(X, Xt, Xe); if_op->set_input(Y, Yt, Ye); auto res = if_op->set_output(then_body_res, else_body_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); Shape out0_shape{32, 40, 10}; auto sh = result0->get_output_shape(0); EXPECT_EQ(sh, out0_shape); } TEST(type_prop, if_clone_test) { - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto cond = make_shared(element::boolean, Shape{1}); + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto cond = make_shared(element::boolean, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Yt = make_shared(element::f32, PartialShape::dynamic()); - auto Xe = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); - auto Xnew = make_shared(element::f32, PartialShape::dynamic()); - auto Ynew = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Yt = make_shared(element::f32, PartialShape::dynamic()); + auto Xe = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xnew = make_shared(element::f32, PartialShape::dynamic()); + auto Ynew = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Yt); - auto then_body_res = make_shared(then_op); + auto then_body_res = make_shared(then_op); auto then_body = make_shared(OutputVector{then_body_res}, ParameterVector{Xt, Yt}); auto else_op = std::make_shared(Xe, Ye); - auto else_body_res = make_shared(else_op); + auto else_body_res = make_shared(else_op); auto else_body = make_shared(OutputVector{else_body_res}, ParameterVector{Xe, Ye}); auto if_op = make_shared(cond); if_op->set_then_body(then_body); @@ -116,30 +123,29 @@ TEST(type_prop, if_clone_test) { } TEST(type_prop, if_multiple_outputs) { - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto cond = make_shared(element::boolean, Shape{1}); + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto cond = make_shared(element::boolean, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Yt = make_shared(element::f32, PartialShape::dynamic()); - auto Xe = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); - auto Xnew = make_shared(element::f32, PartialShape::dynamic()); - auto Ynew = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Yt = make_shared(element::f32, PartialShape::dynamic()); + auto Xe = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xnew = make_shared(element::f32, PartialShape::dynamic()); + auto Ynew = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Yt); - auto then_body_res_1 = make_shared(then_op); - auto then_body_res_2 = make_shared(Xt); + auto then_body_res_1 = make_shared(then_op); + auto then_body_res_2 = make_shared(Xt); auto then_body = make_shared(OutputVector{then_body_res_1, then_body_res_2}, ParameterVector{Xt, Yt}); auto else_op = std::make_shared(Xe, Ye); - auto else_const = std::make_shared(ngraph::element::f32, - ngraph::Shape{1, 1, 1}, - std::vector{0.5f}); - auto else_body_res_1 = make_shared(else_op); - auto else_body_res_2 = make_shared(else_const); + auto else_const = + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1}, std::vector{0.5f}); + auto else_body_res_1 = make_shared(else_op); + auto else_body_res_2 = make_shared(else_const); auto else_body = make_shared(OutputVector{else_body_res_1, else_body_res_2}, ParameterVector{Xe, Ye}); @@ -150,8 +156,8 @@ TEST(type_prop, if_multiple_outputs) { if_op->set_input(Y, Yt, Ye); auto res1 = if_op->set_output(then_body_res_1, else_body_res_1); auto res2 = if_op->set_output(then_body_res_2, else_body_res_2); - auto result1 = make_shared(res1); - auto result2 = make_shared(res2); + auto result1 = make_shared(res1); + auto result2 = make_shared(res2); Shape out0_shape{32, 40, 10}; auto sh = result1->get_output_shape(0); auto is_dynamic = result2->is_dynamic(); @@ -161,23 +167,23 @@ TEST(type_prop, if_multiple_outputs) { TEST(type_prop, if_scalar_condition) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto cond = make_shared(element::boolean, Shape{}); + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto cond = make_shared(element::boolean, Shape{}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Yt = make_shared(element::f32, PartialShape::dynamic()); - auto Xe = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Yt = make_shared(element::f32, PartialShape::dynamic()); + auto Xe = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Yt); - auto then_body_res = make_shared(then_op); + auto then_body_res = make_shared(then_op); auto then_body = make_shared(OutputVector{then_body_res}, ParameterVector{Xt, Yt}); auto else_op = std::make_shared(Xe, Ye); - auto else_body_res = make_shared(else_op); + auto else_body_res = make_shared(else_op); auto else_body = make_shared(OutputVector{else_body_res}, ParameterVector{Xe, Ye}); auto if_op = make_shared(cond); @@ -186,7 +192,7 @@ TEST(type_prop, if_scalar_condition) { if_op->set_input(X, Xt, Xe); if_op->set_input(Y, Yt, Ye); auto res = if_op->set_output(then_body_res, else_body_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); Shape out0_shape{32, 40, 10}; auto sh = result0->get_output_shape(0); EXPECT_EQ(sh, out0_shape); @@ -196,21 +202,21 @@ TEST(type_prop, if_dynamic_output) { // That which we iterate over auto X_shape = Shape{1, 20, 5, 30}; auto Y_shape = Shape{18, 16, 14, 12}; - auto X = make_shared(element::f32, X_shape); - auto Y = make_shared(element::f32, Y_shape); - auto cond = make_shared(element::boolean, Shape{1}); + auto X = make_shared(element::f32, X_shape); + auto Y = make_shared(element::f32, Y_shape); + auto cond = make_shared(element::boolean, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Xt); - auto then_body_res = make_shared(then_op); + auto then_body_res = make_shared(then_op); auto then_body = make_shared(OutputVector{then_body_res}, ParameterVector{Xt}); auto else_op = std::make_shared(Ye, Ye); - auto else_body_res = make_shared(else_op); + auto else_body_res = make_shared(else_op); auto else_body = make_shared(OutputVector{else_body_res}, ParameterVector{Ye}); auto if_op = make_shared(cond); @@ -219,7 +225,7 @@ TEST(type_prop, if_dynamic_output) { if_op->set_input(X, Xt, nullptr); if_op->set_input(Y, nullptr, Ye); auto res = if_op->set_output(then_body_res, else_body_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); auto dynamic_shape = result0->get_output_partial_shape(0); EXPECT_EQ(X_shape.size(), dynamic_shape.rank().get_length()); @@ -240,23 +246,23 @@ TEST(type_prop, if_dynamic_inputs) { auto X_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; auto Y_shape = PartialShape{Dimension::dynamic(), 20, 30}; ; - auto X = make_shared(element::f32, X_shape); - auto Y = make_shared(element::f32, Y_shape); - auto cond = make_shared(element::boolean, Shape{1}); + auto X = make_shared(element::f32, X_shape); + auto Y = make_shared(element::f32, Y_shape); + auto cond = make_shared(element::boolean, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Yt = make_shared(element::f32, PartialShape::dynamic()); - auto Xe = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Yt = make_shared(element::f32, PartialShape::dynamic()); + auto Xe = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Yt); - auto then_body_res = make_shared(then_op); + auto then_body_res = make_shared(then_op); auto then_body = make_shared(OutputVector{then_body_res}, ParameterVector{Xt, Yt}); auto else_op = std::make_shared(Xe, Ye); - auto else_body_res = make_shared(else_op); + auto else_body_res = make_shared(else_op); auto else_body = make_shared(OutputVector{else_body_res}, ParameterVector{Xe, Ye}); auto if_op = make_shared(cond); @@ -265,7 +271,7 @@ TEST(type_prop, if_dynamic_inputs) { if_op->set_input(X, Xt, Xe); if_op->set_input(Y, Yt, Ye); auto res = if_op->set_output(then_body_res, else_body_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); auto dynamic_shape = result0->get_output_partial_shape(0); auto expected_result = PartialShape{Dimension::dynamic(), 20, 30}; EXPECT_EQ(3, dynamic_shape.rank().get_length()); @@ -278,20 +284,20 @@ TEST(type_prop, if_dynamic_inputs) { TEST(type_prop, if_scalar_and_1d_union) { // That which we iterate over - auto X = make_shared(element::f32, Shape{}); - auto Y = make_shared(element::f32, PartialShape::dynamic(1)); - auto cond = make_shared(element::boolean, Shape{}); + auto X = make_shared(element::f32, Shape{}); + auto Y = make_shared(element::f32, PartialShape::dynamic(1)); + auto cond = make_shared(element::boolean, Shape{}); // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Xt); - auto then_body_res = make_shared(then_op); + auto then_body_res = make_shared(then_op); auto then_body = make_shared(OutputVector{then_body_res}, ParameterVector{Xt}); auto else_op = std::make_shared(Ye, Ye); - auto else_body_res = make_shared(else_op); + auto else_body_res = make_shared(else_op); auto else_body = make_shared(OutputVector{else_body_res}, ParameterVector{Ye}); auto if_op = make_shared(cond); @@ -300,7 +306,7 @@ TEST(type_prop, if_scalar_and_1d_union) { if_op->set_input(X, Xt, nullptr); if_op->set_input(Y, nullptr, Ye); auto res = if_op->set_output(then_body_res, else_body_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); PartialShape out_shape{PartialShape::dynamic(1)}; auto sh = result0->get_output_partial_shape(0); EXPECT_EQ(sh, out_shape); @@ -308,20 +314,20 @@ TEST(type_prop, if_scalar_and_1d_union) { TEST(type_prop, if_scalar_and_1d_static_union) { // That which we iterate over - auto X = make_shared(element::f32, Shape{}); - auto Y = make_shared(element::f32, PartialShape{8}); - auto cond = make_shared(element::boolean, Shape{}); + auto X = make_shared(element::f32, Shape{}); + auto Y = make_shared(element::f32, PartialShape{8}); + auto cond = make_shared(element::boolean, Shape{}); // Body parameters - auto Xt = make_shared(element::f32, PartialShape::dynamic()); - auto Ye = make_shared(element::f32, PartialShape::dynamic()); + auto Xt = make_shared(element::f32, PartialShape::dynamic()); + auto Ye = make_shared(element::f32, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Xt); - auto then_body_res = make_shared(then_op); + auto then_body_res = make_shared(then_op); auto then_body = make_shared(OutputVector{then_body_res}, ParameterVector{Xt}); auto else_op = std::make_shared(Ye, Ye); - auto else_body_res = make_shared(else_op); + auto else_body_res = make_shared(else_op); auto else_body = make_shared(OutputVector{else_body_res}, ParameterVector{Ye}); auto if_op = make_shared(cond); @@ -330,7 +336,7 @@ TEST(type_prop, if_scalar_and_1d_static_union) { if_op->set_input(X, Xt, nullptr); if_op->set_input(Y, nullptr, Ye); auto res = if_op->set_output(then_body_res, else_body_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); PartialShape out_shape{PartialShape::dynamic(1)}; auto sh = result0->get_output_partial_shape(0); EXPECT_EQ(sh, out_shape); @@ -338,24 +344,24 @@ TEST(type_prop, if_scalar_and_1d_static_union) { TEST(type_prop, if_element_type_dynamic) { // That which we iterate over - auto X = make_shared(element::f16, Shape{32, 40, 10}); - auto Y = make_shared(element::f16, Shape{32, 40, 10}); - auto cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, false); + auto X = make_shared(element::f16, Shape{32, 40, 10}); + auto Y = make_shared(element::f16, Shape{32, 40, 10}); + auto cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, false); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::dynamic, PartialShape::dynamic()); - auto Yt = make_shared(element::dynamic, PartialShape::dynamic()); - auto Xe = make_shared(element::dynamic, PartialShape::dynamic()); - auto Ye = make_shared(element::dynamic, PartialShape::dynamic()); + auto Xt = make_shared(element::dynamic, PartialShape::dynamic()); + auto Yt = make_shared(element::dynamic, PartialShape::dynamic()); + auto Xe = make_shared(element::dynamic, PartialShape::dynamic()); + auto Ye = make_shared(element::dynamic, PartialShape::dynamic()); // Body auto then_op = std::make_shared(Xt, Yt); - auto then_op_res = std::make_shared(then_op); + auto then_op_res = std::make_shared(then_op); auto then_body = make_shared(OutputVector{then_op_res}, ParameterVector{Xt, Yt}); auto else_op = std::make_shared(Xe, Ye); - auto else_op_res = std::make_shared(else_op); + auto else_op_res = std::make_shared(else_op); auto else_body = make_shared(OutputVector{else_op_res}, ParameterVector{Xe, Ye}); auto if_op = make_shared(cond); if_op->set_then_body(then_body); @@ -363,7 +369,7 @@ TEST(type_prop, if_element_type_dynamic) { if_op->set_input(X, Xt, Xe); if_op->set_input(Y, Yt, Ye); auto res = if_op->set_output(then_op_res, else_op_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); Shape out0_shape{32, 40, 10}; auto sh = result0->get_output_shape(0); EXPECT_EQ(sh, out0_shape); @@ -375,28 +381,28 @@ TEST(type_prop, if_element_type_dynamic) { TEST(type_prop, if_invalid_false_body) { // That which we iterate over - auto X = make_shared(element::f16, Shape{32, 40, 10}); - auto Y = make_shared(element::f16, Shape{32, 40}); - auto cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, false); + auto X = make_shared(element::f16, Shape{32, 40, 10}); + auto Y = make_shared(element::f16, Shape{32, 40}); + auto cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, false); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xt = make_shared(element::dynamic, PartialShape::dynamic()); - auto Yt = make_shared(element::dynamic, PartialShape::dynamic()); - auto Xe = make_shared(element::dynamic, PartialShape::dynamic()); - auto Ye = make_shared(element::dynamic, PartialShape::dynamic()); + auto Xt = make_shared(element::dynamic, PartialShape::dynamic()); + auto Yt = make_shared(element::dynamic, PartialShape::dynamic()); + auto Xe = make_shared(element::dynamic, PartialShape::dynamic()); + auto Ye = make_shared(element::dynamic, PartialShape::dynamic()); // Body - auto axes_4d = opset5::Constant::create(element::i32, ngraph::Shape{2}, {2, 3}); + auto axes_4d = ov::op::v0::Constant::create(element::i32, ngraph::Shape{2}, {2, 3}); auto then_reduce_op = std::make_shared(Xt, Yt); auto then_op = std::make_shared(then_reduce_op, Yt); - auto then_op_res = std::make_shared(then_op); + auto then_op_res = std::make_shared(then_op); auto then_body = make_shared(OutputVector{then_op_res}, ParameterVector{Xt, Yt}); - auto axes_3d = opset5::Constant::create(element::i32, ngraph::Shape{1}, {2}); + auto axes_3d = ov::op::v0::Constant::create(element::i32, ngraph::Shape{1}, {2}); auto else_reduce_op = std::make_shared(Xe, axes_3d); auto else_op = std::make_shared(else_reduce_op, Ye); - auto else_op_res = std::make_shared(else_op); + auto else_op_res = std::make_shared(else_op); auto else_body = make_shared(OutputVector{else_op_res}, ParameterVector{Xe, Ye}); auto if_op = make_shared(cond); if_op->set_then_body(then_body); @@ -404,7 +410,7 @@ TEST(type_prop, if_invalid_false_body) { if_op->set_input(X, Xt, Xe); if_op->set_input(Y, Yt, Ye); auto res = if_op->set_output(then_op_res, else_op_res); - auto result0 = make_shared(res); + auto result0 = make_shared(res); Shape out0_shape{32, 40}; auto sh = result0->get_output_shape(0); EXPECT_EQ(sh, out0_shape); diff --git a/src/core/tests/type_prop/interpolate.cpp b/src/core/tests/type_prop/interpolate.cpp index 209989fe40cbba..174dd935060bea 100644 --- a/src/core/tests/type_prop/interpolate.cpp +++ b/src/core/tests/type_prop/interpolate.cpp @@ -4,12 +4,14 @@ #include "openvino/op/interpolate.hpp" +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" -using namespace ngraph; +using namespace ov; using namespace testing; using InterpolateMode = op::v4::Interpolate::InterpolateMode; @@ -19,8 +21,8 @@ using InterpolateAttrs = op::v4::Interpolate::InterpolateAttrs; using ShapeCalcMode = op::v4::Interpolate::ShapeCalcMode; TEST(type_prop, interpolate_v0_default_ctor) { - auto image = std::make_shared(element::f32, Shape{2, 2, 30, 60}); - auto target_shape = op::Constant::create(element::i32, Shape{2}, {15, 30}); + auto image = std::make_shared(element::f32, Shape{2, 2, 30, 60}); + auto target_shape = ov::op::v0::Constant::create(element::i32, Shape{2}, {15, 30}); op::v0::Interpolate::Attributes attrs; attrs.axes = AxisSet{2, 3}; @@ -38,8 +40,8 @@ TEST(type_prop, interpolate_v0_default_ctor) { } TEST(type_prop, interpolate_v0_all_inputs_dynamic_rank) { - const auto image = std::make_shared(element::f16, PartialShape::dynamic()); - const auto target_shape = std::make_shared(element::i32, PartialShape::dynamic()); + const auto image = std::make_shared(element::f16, PartialShape::dynamic()); + const auto target_shape = std::make_shared(element::i32, PartialShape::dynamic()); op::v0::Interpolate::Attributes attrs; attrs.axes = AxisSet{2, 3}; @@ -53,8 +55,8 @@ TEST(type_prop, interpolate_v0_all_inputs_dynamic_rank) { } TEST(type_prop, interpolate_v0_all_inputs_static_rank) { - const auto image = std::make_shared(element::f16, PartialShape::dynamic(6)); - const auto target_shape = std::make_shared(element::i32, PartialShape::dynamic(1)); + const auto image = std::make_shared(element::f16, PartialShape::dynamic(6)); + const auto target_shape = std::make_shared(element::i32, PartialShape::dynamic(1)); op::v0::Interpolate::Attributes attrs; attrs.axes = AxisSet{2, 3}; @@ -68,8 +70,8 @@ TEST(type_prop, interpolate_v0_all_inputs_static_rank) { } TEST(type_prop, interpolate_v0_target_shape_not_constant) { - const auto image = std::make_shared(element::bf16, PartialShape{2, 4, 12, 12}); - const auto target_shape = std::make_shared(element::i64, PartialShape{1}); + const auto image = std::make_shared(element::bf16, PartialShape{2, 4, 12, 12}); + const auto target_shape = std::make_shared(element::i64, PartialShape{1}); op::v0::Interpolate::Attributes attrs; attrs.axes = AxisSet{3, 1}; @@ -88,8 +90,9 @@ TEST(type_prop, interpolate_v0_target_shape_as_shape_of) { set_shape_labels(img_shape, 10); set_shape_labels(out_shape, 20); - auto image = std::make_shared(element::f64, img_shape); - auto target_shape = std::make_shared(std::make_shared(element::i32, out_shape)); + auto image = std::make_shared(element::f64, img_shape); + auto target_shape = + std::make_shared(std::make_shared(element::i32, out_shape)); op::v0::Interpolate::Attributes attrs; attrs.axes = AxisSet{3, 1}; @@ -104,10 +107,10 @@ TEST(type_prop, interpolate_v0_target_shape_as_shape_of) { // --- v4 --- TEST(type_prop, interpolate_v4_default_ctor) { - auto image = std::make_shared(element::f32, Shape{2, 2, 30, 60}); - auto target_shape = std::make_shared(element::i32, Shape{}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::f32, Shape{2, 2, 30, 60}); + auto target_shape = std::make_shared(element::i32, Shape{}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -130,10 +133,10 @@ TEST(type_prop, interpolate_v4_default_ctor) { } TEST(type_prop, interpolate_v4) { - auto image = std::make_shared(element::f32, Shape{2, 2, 30, 60}); - auto target_shape = std::make_shared(element::i32, Shape{15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::f32, Shape{2, 2, 30, 60}); + auto target_shape = std::make_shared(element::i32, Shape{15, 30}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -155,10 +158,10 @@ TEST(type_prop, interpolate_v4_non_constant_axes_scales) { auto img_shape = PartialShape{2, 2, 30, 60}; set_shape_labels(img_shape, 10); - auto image = std::make_shared(element::f16, img_shape); - auto target_shape = std::make_shared(element::i64, Shape{}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = std::make_shared(element::i32, PartialShape{2}); + auto image = std::make_shared(element::f16, img_shape); + auto target_shape = std::make_shared(element::i64, Shape{}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = std::make_shared(element::i32, PartialShape{2}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -180,11 +183,11 @@ TEST(type_prop, interpolate_v4_non_constant_axes_sizes) { auto img_shape = PartialShape{2, 2, 30, 60}; set_shape_labels(img_shape, 10); - auto image = std::make_shared(element::bf16, img_shape); - auto target_shape = std::make_shared(element::i64, Shape{2}); - auto scales = op::Constant::create(element::f32, Shape{2, 1}, {0.5f, 0.5f}); + auto image = std::make_shared(element::bf16, img_shape); + auto target_shape = std::make_shared(element::i64, Shape{2}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2, 1}, {0.5f, 0.5f}); - auto axes = std::make_shared(element::i32, PartialShape{2}); + auto axes = std::make_shared(element::i32, PartialShape{2}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -203,10 +206,10 @@ TEST(type_prop, interpolate_v4_non_constant_axes_sizes) { } TEST(type_prop, interpolate_v4_img_dynamic_rank) { - auto image = std::make_shared(element::bf16, PartialShape::dynamic()); - auto target_shape = std::make_shared(element::i32, Shape{2}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::bf16, PartialShape::dynamic()); + auto target_shape = std::make_shared(element::i32, Shape{2}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -227,10 +230,10 @@ TEST(type_prop, interpolate_v4_partial_static_rank) { auto img_shape = PartialShape{2, 2, -1, {5, 30}}; set_shape_labels(img_shape, 10); - auto image = std::make_shared(element::f32, img_shape); - auto target_shape = std::make_shared(element::i32, Shape{2}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::f32, img_shape); + auto target_shape = std::make_shared(element::i32, Shape{2}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -253,10 +256,10 @@ TEST(type_prop, interpolate_v4_img_intervals_use_scales) { auto img_shape = PartialShape{{1, 2}, -1, 10, {5, 30}}; set_shape_labels(img_shape, 10); - auto image = std::make_shared(element::f32, img_shape); - auto target_shape = std::make_shared(element::i32, Shape{2}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::f32, img_shape); + auto target_shape = std::make_shared(element::i32, Shape{2}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -280,10 +283,11 @@ TEST(type_prop, interpolate_v4_use_sizes_as_shape_of) { set_shape_labels(img_shape, 10); set_shape_labels(out_shape, 20); - auto image = std::make_shared(element::f32, img_shape); - auto target_shape = std::make_shared(std::make_shared(element::i32, out_shape)); - auto scales = op::Constant::create(element::f32, Shape{2}, {1.0f / 3.0f, 1.0f / 3.0f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {3, 1}); + auto image = std::make_shared(element::f32, img_shape); + auto target_shape = + std::make_shared(std::make_shared(element::i32, out_shape)); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {1.0f / 3.0f, 1.0f / 3.0f}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {3, 1}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -305,10 +309,10 @@ TEST(type_prop, interpolate_v4_use_scales_interval_shapes) { auto img_shape = PartialShape{2, 2, {12, 800}, {0, -1}, {24, -1}}; set_shape_labels(img_shape, 10); - auto image = std::make_shared(element::f32, img_shape); - auto target_shape = std::make_shared(element::i32, Shape{3}); - auto scales = op::Constant::create(element::f32, Shape{3}, {0.5f, 0.25f, 0.125f}); - auto axes = op::Constant::create(element::i64, Shape{3}, {2, 3, 4}); + auto image = std::make_shared(element::f32, img_shape); + auto target_shape = std::make_shared(element::i32, Shape{3}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{3}, {0.5f, 0.25f, 0.125f}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{3}, {2, 3, 4}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::NEAREST; @@ -328,10 +332,10 @@ TEST(type_prop, interpolate_v4_use_scales_interval_shapes) { } TEST(type_prop, interpolate_v4_target_shapes_gt_axes_number) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto target_shape = op::Constant::create(element::i32, Shape{3}, {10, 12, 20}); - const auto scales = op::Constant::create(element::f32, Shape{1}, {0.3f}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {0, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto target_shape = ov::op::v0::Constant::create(element::i32, Shape{3}, {10, 12, 20}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.3f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SIZES; @@ -344,10 +348,10 @@ TEST(type_prop, interpolate_v4_target_shapes_gt_axes_number) { } TEST(type_prop, interpolate_v4_scales_gt_axes_number) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto target_shape = std::make_shared(element::i32, Shape{3}); - const auto scales = op::Constant::create(element::f32, Shape{3}, {0.2f, 0.2f, 0.3f}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto target_shape = std::make_shared(element::i32, Shape{3}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{3}, {0.2f, 0.2f, 0.3f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -360,10 +364,10 @@ TEST(type_prop, interpolate_v4_scales_gt_axes_number) { } TEST(type_prop, interpolate_v4_incorrect_mode) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto target_shape = std::make_shared(element::i32, Shape{2}); - const auto scales = op::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto target_shape = std::make_shared(element::i32, Shape{2}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -382,9 +386,9 @@ TEST(type_prop, interpolate_v4_incorrect_mode) { } TEST(type_prop, interpolate_v4_target_shape_not_1d) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto scales = op::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SIZES; @@ -394,28 +398,28 @@ TEST(type_prop, interpolate_v4_target_shape_not_1d) { OV_EXPECT_THROW(std::ignore = std::make_shared( image, - std::make_shared(element::i32, Shape{1, 2}), + std::make_shared(element::i32, Shape{1, 2}), scales, axes, attrs), ov::NodeValidationFailure, HasSubstr("Input [1] is not rank 1")); - OV_EXPECT_THROW( - std::ignore = std::make_shared(image, - std::make_shared(element::i32, Shape{}), - scales, - axes, - attrs), - ov::NodeValidationFailure, - HasSubstr("Input [1] is not rank 1")); + OV_EXPECT_THROW(std::ignore = std::make_shared( + image, + std::make_shared(element::i32, Shape{}), + scales, + axes, + attrs), + ov::NodeValidationFailure, + HasSubstr("Input [1] is not rank 1")); } TEST(type_prop, interpolate_v4_scales_not_1d) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto target_shape = op::Constant::create(element::i32, Shape{2}, {10, 20}); - const auto scales = op::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto target_shape = ov::op::v0::Constant::create(element::i32, Shape{2}, {10, 20}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -426,27 +430,27 @@ TEST(type_prop, interpolate_v4_scales_not_1d) { OV_EXPECT_THROW(std::ignore = std::make_shared( image, target_shape, - std::make_shared(element::f32, Shape{1, 2}), + std::make_shared(element::f32, Shape{1, 2}), axes, attrs), ov::NodeValidationFailure, HasSubstr("Input [2] is not rank 1")); - OV_EXPECT_THROW( - std::ignore = std::make_shared(image, - target_shape, - std::make_shared(element::f32, Shape{}), - axes, - attrs), - ov::NodeValidationFailure, - HasSubstr("Input [2] is not rank 1")); + OV_EXPECT_THROW(std::ignore = std::make_shared( + image, + target_shape, + std::make_shared(element::f32, Shape{}), + axes, + attrs), + ov::NodeValidationFailure, + HasSubstr("Input [2] is not rank 1")); } TEST(type_prop, interpolate_v4_axes_not_1d) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto target_shape = op::Constant::create(element::i32, Shape{2}, {10, 20}); - const auto scales = op::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto target_shape = ov::op::v0::Constant::create(element::i32, Shape{2}, {10, 20}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -458,7 +462,7 @@ TEST(type_prop, interpolate_v4_axes_not_1d) { image, target_shape, scales, - std::make_shared(element::i32, Shape{1, 2}), + std::make_shared(element::i32, Shape{1, 2}), attrs), ov::NodeValidationFailure, HasSubstr("Input [3] is not rank 1")); @@ -467,7 +471,7 @@ TEST(type_prop, interpolate_v4_axes_not_1d) { image, target_shape, scales, - std::make_shared(element::i32, Shape{1, 2}), + std::make_shared(element::i32, Shape{1, 2}), attrs), ov::NodeValidationFailure, HasSubstr("Input [3] is not rank 1")); @@ -475,9 +479,9 @@ TEST(type_prop, interpolate_v4_axes_not_1d) { // --- v11 --- TEST(type_prop, interpolate_v11_default_ctor) { - auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.2f, 0.2f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.2f, 0.2f}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -495,9 +499,9 @@ TEST(type_prop, interpolate_v11_default_ctor) { } TEST(type_prop, interpolate_v11_scales) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto scales = op::Constant::create(element::f32, Shape{2}, {0.2f, 0.2f}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.2f, 0.2f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -510,9 +514,9 @@ TEST(type_prop, interpolate_v11_scales) { } TEST(type_prop, interpolate_v11_scales_all_inputs_static_rank) { - const auto image = std::make_shared(element::f16, PartialShape::dynamic(8)); - const auto scales = std::make_shared(element::f32, PartialShape::dynamic(1)); - const auto axes = std::make_shared(element::i64, PartialShape::dynamic(1)); + const auto image = std::make_shared(element::f16, PartialShape::dynamic(8)); + const auto scales = std::make_shared(element::f32, PartialShape::dynamic(1)); + const auto axes = std::make_shared(element::i64, PartialShape::dynamic(1)); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -525,9 +529,9 @@ TEST(type_prop, interpolate_v11_scales_all_inputs_static_rank) { } TEST(type_prop, interpolate_v11_sizes) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto sizes = op::Constant::create(element::i32, Shape{2}, {6, 12}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto sizes = ov::op::v0::Constant::create(element::i32, Shape{2}, {6, 12}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SIZES; @@ -540,9 +544,9 @@ TEST(type_prop, interpolate_v11_sizes) { } TEST(type_prop, interpolate_v11_sizes_all_inputs_dynamic_rank) { - const auto image = std::make_shared(element::f32, PartialShape::dynamic()); - const auto sizes = std::make_shared(element::i32, PartialShape::dynamic()); - const auto axes = std::make_shared(element::i64, PartialShape::dynamic()); + const auto image = std::make_shared(element::f32, PartialShape::dynamic()); + const auto sizes = std::make_shared(element::i32, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i64, PartialShape::dynamic()); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SIZES; @@ -558,9 +562,9 @@ TEST(type_prop, interpolate_v11_intervals_with_scales_mode) { auto img_shape = PartialShape{{1, 3}, 3, {1, 10}, {10, -1}, {10, 20}}; set_shape_labels(img_shape, 10); - const auto image = std::make_shared(element::f32, img_shape); - const auto scales = op::Constant::create(element::f32, Shape{3}, {2.0f, 3.0f, 1.0f}); - const auto axes = op::Constant::create(element::i64, Shape{3}, {2, 3, 4}); + const auto image = std::make_shared(element::f32, img_shape); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{3}, {2.0f, 3.0f, 1.0f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{3}, {2, 3, 4}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -577,9 +581,9 @@ TEST(type_prop, interpolate_v11_intervals_with_sizes_mode) { auto img_shape = PartialShape{{1, 3}, 3, {1, 10}, {10, -1}}; set_shape_labels(img_shape, 10); - const auto image = std::make_shared(element::f32, img_shape); - const auto sizes = op::Constant::create(element::i32, Shape{2}, {200, 300}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, img_shape); + const auto sizes = ov::op::v0::Constant::create(element::i32, Shape{2}, {200, 300}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SIZES; @@ -597,10 +601,10 @@ TEST(type_prop, interpolate_v11_sizes_with_shapeof) { set_shape_labels(img_shape, 10); set_shape_labels(sizes_shape, 20); - const auto image = std::make_shared(element::f32, img_shape); - const auto param = std::make_shared(element::f32, sizes_shape); - const auto sizes = std::make_shared(param); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 1}); + const auto image = std::make_shared(element::f32, img_shape); + const auto param = std::make_shared(element::f32, sizes_shape); + const auto sizes = std::make_shared(param); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 1}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SIZES; @@ -616,9 +620,9 @@ TEST(type_prop, interpolate_v11_non_constant_axes_scales) { auto img_shape = PartialShape{2, 2, 30, 60}; set_shape_labels(img_shape, 10); - auto image = std::make_shared(element::f16, img_shape); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = std::make_shared(element::i32, PartialShape{2}); + auto image = std::make_shared(element::f16, img_shape); + auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = std::make_shared(element::i32, PartialShape{2}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -632,9 +636,9 @@ TEST(type_prop, interpolate_v11_non_constant_axes_scales) { } TEST(type_prop, interpolate_v11_scales_incorrect_et) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto scales = op::Constant::create(element::i64, Shape{2}, {2, 2}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto scales = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 2}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -647,9 +651,9 @@ TEST(type_prop, interpolate_v11_scales_incorrect_et) { } TEST(type_prop, interpolate_v11_sizes_incorrect_et) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto sizes = op::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); - const auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto sizes = ov::op::v0::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); + const auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SIZES; @@ -662,8 +666,8 @@ TEST(type_prop, interpolate_v11_sizes_incorrect_et) { } TEST(type_prop, interpolate_v11_scales_incorrect_number) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto scales = op::Constant::create(element::f32, Shape{2}, {0.2f, 0.2f}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {0.2f, 0.2f}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -675,8 +679,8 @@ TEST(type_prop, interpolate_v11_scales_incorrect_number) { } TEST(type_prop, interpolate_v11_sizes_incorrect_number) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto sizes = op::Constant::create(element::i32, Shape{2}, {6, 12}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto sizes = ov::op::v0::Constant::create(element::i32, Shape{2}, {6, 12}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SIZES; @@ -688,8 +692,8 @@ TEST(type_prop, interpolate_v11_sizes_incorrect_number) { } TEST(type_prop, interpolate_v11_scales_not_1d) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto axes = op::Constant::create(element::i32, Shape{2}, {2, 3}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto axes = ov::op::v0::Constant::create(element::i32, Shape{2}, {2, 3}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -698,24 +702,24 @@ TEST(type_prop, interpolate_v11_scales_not_1d) { OV_EXPECT_THROW(std::ignore = std::make_shared( image, - std::make_shared(element::f32, Shape{1, 2}), + std::make_shared(element::f32, Shape{1, 2}), axes, attrs), ov::NodeValidationFailure, HasSubstr("Input [1] is not rank 1")); - OV_EXPECT_THROW( - std::ignore = std::make_shared(image, - std::make_shared(element::f32, Shape{}), - axes, - attrs), - ov::NodeValidationFailure, - HasSubstr("Input [1] is not rank 1")); + OV_EXPECT_THROW(std::ignore = std::make_shared( + image, + std::make_shared(element::f32, Shape{}), + axes, + attrs), + ov::NodeValidationFailure, + HasSubstr("Input [1] is not rank 1")); } TEST(type_prop, interpolate_v11_axes_not_1d) { - const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); - const auto scales = op::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); + const auto image = std::make_shared(element::f32, Shape{1, 3, 30, 60}); + const auto scales = ov::op::v0::Constant::create(element::f32, Shape{2}, {6.f, 12.f}); ov::op::util::InterpolateBase::InterpolateAttrs attrs; attrs.shape_calculation_mode = ov::op::util::InterpolateBase::ShapeCalcMode::SCALES; @@ -726,16 +730,16 @@ TEST(type_prop, interpolate_v11_axes_not_1d) { OV_EXPECT_THROW(std::ignore = std::make_shared( image, scales, - std::make_shared(element::i32, Shape{1, 2}), + std::make_shared(element::i32, Shape{1, 2}), attrs), ov::NodeValidationFailure, HasSubstr("Input [2] is not rank 1")); - OV_EXPECT_THROW( - std::ignore = std::make_shared(image, - scales, - std::make_shared(element::i32, Shape{}), - attrs), - ov::NodeValidationFailure, - HasSubstr("Input [2] is not rank 1")); + OV_EXPECT_THROW(std::ignore = std::make_shared( + image, + scales, + std::make_shared(element::i32, Shape{}), + attrs), + ov::NodeValidationFailure, + HasSubstr("Input [2] is not rank 1")); } diff --git a/src/core/tests/type_prop/irdft.cpp b/src/core/tests/type_prop/irdft.cpp index 7eef7ba6fd33b8..b0e448c66be941 100644 --- a/src/core/tests/type_prop/irdft.cpp +++ b/src/core/tests/type_prop/irdft.cpp @@ -14,11 +14,14 @@ // limitations under the License. //***************************************************************************** +#include "openvino/op/irdft.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" -using namespace ngraph; +using namespace ov; struct IRDFTConstantAxesAndConstantSignalSizeTestParams { PartialShape input_shape; @@ -35,15 +38,15 @@ struct IRDFTConstantAxesAndConstantSignalSizeTest TEST_P(IRDFTConstantAxesAndConstantSignalSizeTest, irdft_constant_axes_and_signal_size) { auto params = GetParam(); - auto data = std::make_shared(element::f32, params.input_shape); - auto axes_input = op::Constant::create(element::i64, params.axes_shape, params.axes); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = ov::op::v0::Constant::create(element::i64, params.axes_shape, params.axes); std::shared_ptr irdft; if (params.signal_size.empty()) { irdft = std::make_shared(data, axes_input); } else { auto signal_size_input = - op::Constant::create(element::i64, params.signal_size_shape, params.signal_size); + ov::op::v0::Constant::create(element::i64, params.signal_size_shape, params.signal_size); irdft = std::make_shared(data, axes_input, signal_size_input); } @@ -187,8 +190,8 @@ TEST(type_prop, irdft_dynamic_axes) { const auto axes_shape = PartialShape::dynamic(); const auto ref_output_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - auto data = std::make_shared(element::f32, input_shape); - auto axes_input = std::make_shared(element::i64, axes_shape); + auto data = std::make_shared(element::f32, input_shape); + auto axes_input = std::make_shared(element::i64, axes_shape); auto irdft = std::make_shared(data, axes_input); EXPECT_EQ(irdft->get_element_type(), element::f32); @@ -206,8 +209,8 @@ struct IRDFTNonConstantAxesTest : ::testing::TestWithParam(element::f32, params.input_shape); - auto axes_input = std::make_shared(element::i64, params.axes_shape); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = std::make_shared(element::i64, params.axes_shape); auto irdft = std::make_shared(data, axes_input); EXPECT_EQ(irdft->get_element_type(), element::f32); @@ -278,9 +281,9 @@ struct IRDFTNonConstantSignalSizeTest : ::testing::TestWithParam(element::f32, params.input_shape); - auto axes_input = op::Constant::create(element::i64, params.axes_shape, params.axes); - auto signal_size_input = std::make_shared(element::i64, params.signal_size_shape); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = ov::op::v0::Constant::create(element::i64, params.axes_shape, params.axes); + auto signal_size_input = std::make_shared(element::i64, params.signal_size_shape); auto irdft = std::make_shared(data, axes_input, signal_size_input); EXPECT_EQ(irdft->get_element_type(), element::f32); @@ -309,10 +312,10 @@ INSTANTIATE_TEST_SUITE_P( PrintToDummyParamName()); TEST(type_prop, irdft_invalid_input) { - auto axes = op::Constant::create(element::i64, Shape{2}, {0, 1}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 1}); try { - auto data = std::make_shared(element::f32, Shape{2}); + auto data = std::make_shared(element::f32, Shape{2}); auto irdft = std::make_shared(data, axes); FAIL() << "IRDFT node was created with invalid input."; } catch (const NodeValidationFailure& error) { @@ -320,7 +323,7 @@ TEST(type_prop, irdft_invalid_input) { } try { - auto data = std::make_shared(element::f32, Shape{4, 3}); + auto data = std::make_shared(element::f32, Shape{4, 3}); auto irdft = std::make_shared(data, axes); FAIL() << "IRDFT node was created with invalid input."; } catch (const NodeValidationFailure& error) { @@ -328,7 +331,7 @@ TEST(type_prop, irdft_invalid_input) { } try { - auto data = std::make_shared(element::f32, Shape{4, 2}); + auto data = std::make_shared(element::f32, Shape{4, 2}); auto irdft = std::make_shared(data, axes); FAIL() << "IRDFT node was created with invalid input."; } catch (const NodeValidationFailure& error) { @@ -337,10 +340,10 @@ TEST(type_prop, irdft_invalid_input) { } TEST(type_prop, irdft_invalid_axes) { - auto data = std::make_shared(element::f32, Shape{4, 3, 2}); + auto data = std::make_shared(element::f32, Shape{4, 3, 2}); try { - auto axes = op::Constant::create(element::i64, Shape{1}, {3}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {3}); auto irdft = std::make_shared(data, axes); FAIL() << "IRDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -348,7 +351,7 @@ TEST(type_prop, irdft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{1}, {-3}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {-3}); auto irdft = std::make_shared(data, axes); FAIL() << "IRDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -356,7 +359,7 @@ TEST(type_prop, irdft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{2}, {0, -2}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, -2}); auto irdft = std::make_shared(data, axes); FAIL() << "IRDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -364,7 +367,7 @@ TEST(type_prop, irdft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{1}, {2}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {2}); auto irdft = std::make_shared(data, axes); FAIL() << "IRDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -372,7 +375,7 @@ TEST(type_prop, irdft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{1, 2}, {0, 1}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1, 2}, {0, 1}); auto irdft = std::make_shared(data, axes); FAIL() << "IRDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -381,11 +384,11 @@ TEST(type_prop, irdft_invalid_axes) { } TEST(type_prop, irdft_invalid_signal_size) { - auto data = std::make_shared(element::f32, Shape{4, 3, 2}); - auto axes = op::Constant::create(element::i64, Shape{1}, {0}); + auto data = std::make_shared(element::f32, Shape{4, 3, 2}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {0}); try { - auto signal_size = op::Constant::create(element::i64, Shape{1, 2}, {0, 1}); + auto signal_size = ov::op::v0::Constant::create(element::i64, Shape{1, 2}, {0, 1}); auto irdft = std::make_shared(data, axes, signal_size); FAIL() << "IRDFT node was created with invalid signal size."; } catch (const NodeValidationFailure& error) { @@ -393,7 +396,7 @@ TEST(type_prop, irdft_invalid_signal_size) { } try { - auto signal_size = op::Constant::create(element::i64, Shape{2}, {0, 1}); + auto signal_size = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 1}); auto irdft = std::make_shared(data, axes, signal_size); FAIL() << "IRDFT node was created with invalid signal size."; } catch (const NodeValidationFailure& error) { @@ -407,9 +410,9 @@ TEST(type_prop, irdft_dynamic_types) { const auto signal_size_shape = PartialShape::dynamic(); const auto ref_output_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - auto data = std::make_shared(element::dynamic, input_shape); - auto axes_input = std::make_shared(element::dynamic, axes_shape); - auto signal_size_input = std::make_shared(element::dynamic, signal_size_shape); + auto data = std::make_shared(element::dynamic, input_shape); + auto axes_input = std::make_shared(element::dynamic, axes_shape); + auto signal_size_input = std::make_shared(element::dynamic, signal_size_shape); auto irdft = std::make_shared(data, axes_input, signal_size_input); EXPECT_EQ(irdft->get_element_type(), element::dynamic); diff --git a/src/core/tests/type_prop/is_finite.cpp b/src/core/tests/type_prop/is_finite.cpp index a869af2263c3ad..10be851239e6e3 100644 --- a/src/core/tests/type_prop/is_finite.cpp +++ b/src/core/tests/type_prop/is_finite.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/opsets/opset10.hpp" using namespace ov::opset10; diff --git a/src/core/tests/type_prop/is_inf.cpp b/src/core/tests/type_prop/is_inf.cpp index cbecf344b8e01d..04991801a17fe0 100644 --- a/src/core/tests/type_prop/is_inf.cpp +++ b/src/core/tests/type_prop/is_inf.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/opsets/opset10.hpp" using namespace std; diff --git a/src/core/tests/type_prop/is_nan.cpp b/src/core/tests/type_prop/is_nan.cpp index badb13ef41cac2..c3173f992cc732 100644 --- a/src/core/tests/type_prop/is_nan.cpp +++ b/src/core/tests/type_prop/is_nan.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/opsets/opset10.hpp" using namespace std; diff --git a/src/core/tests/type_prop/log_softmax.cpp b/src/core/tests/type_prop/log_softmax.cpp index 0f0192f34d5581..5650990682b54c 100644 --- a/src/core/tests/type_prop/log_softmax.cpp +++ b/src/core/tests/type_prop/log_softmax.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/log_softmax.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, log_softmax) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); auto log_softmax_func = make_shared(data, 1); EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); EXPECT_EQ(log_softmax_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, log_softmax_incorrect_axis) { - const auto data = make_shared(element::f32, Shape{1, 3, 6}); + const auto data = make_shared(element::f32, Shape{1, 3, 6}); try { auto log_softmax_func = make_shared(data, 3); @@ -29,7 +29,7 @@ TEST(type_prop, log_softmax_incorrect_axis) { // TEST(type_prop, log_softmax_partial) // { -// auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); +// auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); // auto log_softmax_func = make_shared(data, 1); // EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); // ASSERT_TRUE(log_softmax_func->get_output_partial_shape(0).same_scheme( @@ -37,13 +37,13 @@ TEST(type_prop, log_softmax_incorrect_axis) { // // // rank unknown // auto log_softmax_partial = make_shared( -// make_shared(element::f32, PartialShape::dynamic())); +// make_shared(element::f32, PartialShape::dynamic())); // ASSERT_TRUE( // log_softmax_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); // } TEST(type_prop, log_softmax_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto log_softmax_func = make_shared(data, 1); EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); ASSERT_TRUE(log_softmax_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); diff --git a/src/core/tests/type_prop/logical_and.cpp b/src/core/tests/type_prop/logical_and.cpp index 81360419f6850e..a16eafa51311c5 100644 --- a/src/core/tests/type_prop/logical_and.cpp +++ b/src/core/tests/type_prop/logical_and.cpp @@ -2,9 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/logical_and.hpp" + #include "common_test_utils/type_prop.hpp" #include "logical_ops.hpp" -using Type = ::testing::Types>; +using Type = ::testing::Types>; INSTANTIATE_TYPED_TEST_SUITE_P(Type_prop_test, LogicalOperatorTypeProp, Type, LogicalOperatorTypeName); diff --git a/src/core/tests/type_prop/logical_not.cpp b/src/core/tests/type_prop/logical_not.cpp index e051d29a35d68a..bcd27c48c7f029 100644 --- a/src/core/tests/type_prop/logical_not.cpp +++ b/src/core/tests/type_prop/logical_not.cpp @@ -2,12 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/logical_not.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; using LogicalNotTestParam = std::tuple; @@ -23,7 +25,7 @@ const auto dynamic_shapes = Values(PartialShape{Dimension::dynamic(), Dimension: PartialShape::dynamic()); } // namespace -class LogicalNotTest : public TypePropOpTest, public WithParamInterface { +class LogicalNotTest : public TypePropOpTest, public WithParamInterface { protected: void SetUp() override { std::tie(exp_type, exp_shape) = GetParam(); @@ -43,7 +45,7 @@ INSTANTIATE_TEST_SUITE_P(type_prop_dynamic_static_rank, PrintToStringParamName()); TEST_P(LogicalNotTest, propagate_dimensions) { - const auto input = std::make_shared(exp_type, exp_shape); + const auto input = std::make_shared(exp_type, exp_shape); const auto op = make_op(input); EXPECT_EQ(op->get_element_type(), exp_type); @@ -57,7 +59,7 @@ TEST_P(LogicalNotTest, propagate_labels) { } const auto exp_labels = get_shape_labels(exp_shape); - const auto input = std::make_shared(exp_type, exp_shape); + const auto input = std::make_shared(exp_type, exp_shape); const auto op = make_op(input); EXPECT_EQ(get_shape_labels(op->get_output_partial_shape(0)), exp_labels); @@ -65,7 +67,7 @@ TEST_P(LogicalNotTest, propagate_labels) { TEST_P(LogicalNotTest, default_ctor) { const auto op = std::make_shared(); - const auto input = std::make_shared(exp_type, exp_shape); + const auto input = std::make_shared(exp_type, exp_shape); op->set_argument(0, input); op->validate_and_infer_types(); diff --git a/src/core/tests/type_prop/logical_ops.hpp b/src/core/tests/type_prop/logical_ops.hpp index 881c750d86b280..b3e226e5ecb890 100644 --- a/src/core/tests/type_prop/logical_ops.hpp +++ b/src/core/tests/type_prop/logical_ops.hpp @@ -6,14 +6,13 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" -template +template class LogicalOperatorType { public: using op_type = T; - static constexpr ngraph::element::Type_t element_type = ELEMENT_TYPE; + static constexpr ov::element::Type_t element_type = ELEMENT_TYPE; }; template @@ -27,7 +26,7 @@ class LogicalOperatorTypeName { template static std::string GetName(int) { using OP_Type = typename T::op_type; - const ngraph::Node::type_info_t typeinfo = OP_Type::get_type_info_static(); + const ov::Node::type_info_t typeinfo = OP_Type::get_type_info_static(); return typeinfo.name; } }; @@ -36,15 +35,15 @@ TYPED_TEST_SUITE_P(LogicalOperatorTypeProp); namespace { template -void incorrect_init(const ngraph::element::Type& type, +void incorrect_init(const ov::element::Type& type, const std::string& err, - const ngraph::Shape& shape1 = {1, 3, 6}, - const ngraph::Shape& shape2 = {1, 3, 6}) { - auto input1 = std::make_shared(type, shape1); - auto input2 = std::make_shared(type, shape2); + const ov::Shape& shape1 = {1, 3, 6}, + const ov::Shape& shape2 = {1, 3, 6}) { + auto input1 = std::make_shared(type, shape1); + auto input2 = std::make_shared(type, shape2); try { auto op = std::make_shared(input1, input2); - } catch (const ngraph::NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), err); } } @@ -52,52 +51,52 @@ void incorrect_init(const ngraph::element::Type& type, TYPED_TEST_P(LogicalOperatorTypeProp, incorrect_type_f32) { using OP_Type = typename TypeParam::op_type; - incorrect_init(ngraph::element::f32, + incorrect_init(ov::element::f32, "Operands for logical operators must have boolean element type but have element type f32"); } TYPED_TEST_P(LogicalOperatorTypeProp, incorrect_type_f64) { using OP_Type = typename TypeParam::op_type; - incorrect_init(ngraph::element::f64, + incorrect_init(ov::element::f64, "Operands for logical operators must have boolean element type but have element type f64"); } TYPED_TEST_P(LogicalOperatorTypeProp, incorrect_type_i32) { using OP_Type = typename TypeParam::op_type; - incorrect_init(ngraph::element::i32, + incorrect_init(ov::element::i32, "Operands for logical operators must have boolean element type but have element type i32"); } TYPED_TEST_P(LogicalOperatorTypeProp, incorrect_type_i64) { using OP_Type = typename TypeParam::op_type; - incorrect_init(ngraph::element::i64, + incorrect_init(ov::element::i64, "Operands for logical operators must have boolean element type but have element type i64"); } TYPED_TEST_P(LogicalOperatorTypeProp, incorrect_type_u32) { using OP_Type = typename TypeParam::op_type; - incorrect_init(ngraph::element::u32, + incorrect_init(ov::element::u32, "Operands for logical operators must have boolean element type but have element type u32"); } TYPED_TEST_P(LogicalOperatorTypeProp, incorrect_type_u64) { using OP_Type = typename TypeParam::op_type; - incorrect_init(ngraph::element::u64, + incorrect_init(ov::element::u64, "Operands for logical operators must have boolean element type but have element type u64"); } TYPED_TEST_P(LogicalOperatorTypeProp, incorrect_shape) { using OP_Type = typename TypeParam::op_type; - incorrect_init(ngraph::element::boolean, + incorrect_init(ov::element::boolean, "Argument shapes are inconsistent", - ngraph::Shape{1, 3, 6}, - ngraph::Shape{1, 2, 3}); + ov::Shape{1, 3, 6}, + ov::Shape{1, 2, 3}); } TYPED_TEST_P(LogicalOperatorTypeProp, inputs_have_different_types) { - using namespace ngraph; - const auto a = std::make_shared(element::boolean, PartialShape{1, 1, 6}); - const auto b = std::make_shared(element::f16, PartialShape{1, 3, 1}); + using namespace ov; + const auto a = std::make_shared(element::boolean, PartialShape{1, 1, 6}); + const auto b = std::make_shared(element::f16, PartialShape{1, 3, 1}); OV_EXPECT_THROW(const auto logical_op = this->make_op(a, b), NodeValidationFailure, @@ -105,9 +104,9 @@ TYPED_TEST_P(LogicalOperatorTypeProp, inputs_have_different_types) { } TYPED_TEST_P(LogicalOperatorTypeProp, inputs_have_inconsistent_shapes) { - using namespace ngraph; - const auto a = std::make_shared(element::boolean, PartialShape{1, 1, 6}); - const auto b = std::make_shared(element::boolean, PartialShape{1, 3, 3}); + using namespace ov; + const auto a = std::make_shared(element::boolean, PartialShape{1, 1, 6}); + const auto b = std::make_shared(element::boolean, PartialShape{1, 3, 3}); OV_EXPECT_THROW(const auto logical_op = this->make_op(a, b), NodeValidationFailure, @@ -115,11 +114,11 @@ TYPED_TEST_P(LogicalOperatorTypeProp, inputs_have_inconsistent_shapes) { } TYPED_TEST_P(LogicalOperatorTypeProp, shape_broadcast) { - using namespace ngraph; + using namespace ov; const auto exp_dtype = TypeParam::element_type; - const auto a = std::make_shared(element::boolean, Shape{1, 1, 6}); - const auto b = std::make_shared(element::boolean, Shape{1, 3, 1}); + const auto a = std::make_shared(element::boolean, Shape{1, 1, 6}); + const auto b = std::make_shared(element::boolean, Shape{1, 3, 1}); const auto logical_op = this->make_op(a, b); @@ -129,7 +128,7 @@ TYPED_TEST_P(LogicalOperatorTypeProp, shape_broadcast) { } TYPED_TEST_P(LogicalOperatorTypeProp, partial_shape_no_broadcast) { - using namespace ngraph; + using namespace ov; using namespace testing; auto shape_a = PartialShape{1, {2, 4}, {2, 5}, 4, -1}; @@ -138,8 +137,8 @@ TYPED_TEST_P(LogicalOperatorTypeProp, partial_shape_no_broadcast) { set_shape_labels(shape_b, ov::TensorLabel{20, 21, ov::no_label, ov::no_label, ov::no_label}); const auto exp_shape = PartialShape{1, 3, {2, 5}, 4, {-1, 5}}; - const auto a = std::make_shared(element::boolean, shape_a); - const auto b = std::make_shared(element::boolean, shape_b); + const auto a = std::make_shared(element::boolean, shape_a); + const auto b = std::make_shared(element::boolean, shape_b); EXPECT_THAT(this->make_op(a, b, "NONE")->get_output_partial_shape(0), AllOf(Eq(exp_shape), ResultOf(get_shape_labels, ElementsAre(20, 21, 12, ov::no_label, 14)))); @@ -149,7 +148,7 @@ TYPED_TEST_P(LogicalOperatorTypeProp, partial_shape_no_broadcast) { } TYPED_TEST_P(LogicalOperatorTypeProp, partial_shape_numpy_broadcast) { - using namespace ngraph; + using namespace ov; using namespace testing; auto shape_a = PartialShape{1, {2, 4}, {2, 5}, 4, -1}; @@ -158,8 +157,8 @@ TYPED_TEST_P(LogicalOperatorTypeProp, partial_shape_numpy_broadcast) { set_shape_labels(shape_b, ov::TensorLabel{20, 21, ov::no_label, 23}); const auto exp_shape = PartialShape{1, {2, 4}, 3, 4, 4}; - const auto a = std::make_shared(element::boolean, shape_a); - const auto b = std::make_shared(element::boolean, shape_b); + const auto a = std::make_shared(element::boolean, shape_a); + const auto b = std::make_shared(element::boolean, shape_b); EXPECT_THAT(this->make_op(a, b, "NUMPY")->get_output_partial_shape(0), AllOf(Eq(exp_shape), ResultOf(get_shape_labels, ElementsAre(ov::no_label, 11, 21, 13, 23)))); @@ -169,11 +168,11 @@ TYPED_TEST_P(LogicalOperatorTypeProp, partial_shape_numpy_broadcast) { } TYPED_TEST_P(LogicalOperatorTypeProp, default_ctor) { - using namespace ngraph; + using namespace ov; const auto op = this->make_op(); - const auto a = std::make_shared(element::boolean, PartialShape{1, {2, 4}, {2, 5}, 4, -1}); - const auto b = std::make_shared(element::boolean, PartialShape{1, 3, {1, 6}, 4}); + const auto a = std::make_shared(element::boolean, PartialShape{1, {2, 4}, {2, 5}, 4, -1}); + const auto b = std::make_shared(element::boolean, PartialShape{1, 3, {1, 6}, 4}); op->set_arguments(NodeVector{a, b}); op->set_autob("NUMPY"); diff --git a/src/core/tests/type_prop/logical_or.cpp b/src/core/tests/type_prop/logical_or.cpp index f64a8d10a45fd3..a8984b150891ad 100644 --- a/src/core/tests/type_prop/logical_or.cpp +++ b/src/core/tests/type_prop/logical_or.cpp @@ -2,9 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/logical_or.hpp" + #include "common_test_utils/type_prop.hpp" #include "logical_ops.hpp" -using Type = ::testing::Types>; +using Type = ::testing::Types>; INSTANTIATE_TYPED_TEST_SUITE_P(Type_prop_test, LogicalOperatorTypeProp, Type, LogicalOperatorTypeName); diff --git a/src/core/tests/type_prop/logical_xor.cpp b/src/core/tests/type_prop/logical_xor.cpp index 3c0fa9fbd16342..06e67fa1db76ac 100644 --- a/src/core/tests/type_prop/logical_xor.cpp +++ b/src/core/tests/type_prop/logical_xor.cpp @@ -2,9 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/logical_xor.hpp" + #include "common_test_utils/type_prop.hpp" #include "logical_ops.hpp" -using Type = ::testing::Types>; +using Type = ::testing::Types>; INSTANTIATE_TYPED_TEST_SUITE_P(Type_prop_test, LogicalOperatorTypeProp, Type, LogicalOperatorTypeName); diff --git a/src/core/tests/type_prop/loop.cpp b/src/core/tests/type_prop/loop.cpp index 73a9b6c70cb984..124c81b14c37b0 100644 --- a/src/core/tests/type_prop/loop.cpp +++ b/src/core/tests/type_prop/loop.cpp @@ -2,13 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/loop.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset5.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/tensor_iterator.hpp" +#include "openvino/op/unsqueeze.hpp" using namespace std; -using namespace ngraph; +using namespace ov; // trip_count = 10 // execution_condition = true @@ -16,46 +25,46 @@ using namespace ngraph; // all shapes are static, 10 iterations will be executed TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared(element::boolean, Shape{1}, true); + + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{1}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{body_condition, Zo}, - ParameterVector{current_iteration, Xi, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{body_condition, Zo}, ParameterVector{current_iteration, Xi, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); loop->set_merged_input(M_body, M, Zo); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); // check input descriptors for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -71,22 +80,22 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) { for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); Shape out0_shape{1}; Shape out1_shape{32, 1, 10}; Shape out2_shape{32, 10, 10}; auto results = ResultVector{result0, result1, result2}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); EXPECT_EQ(result2->get_output_shape(0), out2_shape); @@ -102,29 +111,29 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) { // will be executed only 1 iteration, all shapes are static TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, false); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared(element::boolean, Shape{1}, false); + + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{1}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{body_condition, Zo}, - ParameterVector{current_iteration, Xi, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{body_condition, Zo}, ParameterVector{current_iteration, Xi, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); @@ -134,13 +143,13 @@ TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) { for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -156,22 +165,22 @@ TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) { for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); Shape out0_shape{1}; Shape out1_shape{32, 1, 10}; Shape out2_shape{32, 1, 10}; auto results = ResultVector{result0, result1, result2}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); EXPECT_EQ(result2->get_output_shape(0), out2_shape); @@ -187,29 +196,29 @@ TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) { // concat output is not provided, another outputs will be static TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{1}); - auto Y = make_shared(element::f32, Shape{1}); - auto M = make_shared(element::f32, Shape{1}); + auto X = make_shared(element::f32, Shape{1}); + auto Y = make_shared(element::f32, Shape{1}); + auto M = make_shared(element::f32, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto condition_const = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); - auto body_condition = std::make_shared(M_body, condition_const); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto condition_const = std::make_shared(element::f32, Shape{1}, 10); + auto body_condition = std::make_shared(M_body, condition_const); + + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{1}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{body_condition, Zo}, ParameterVector{Xi, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = make_shared(OutputVector{body_condition, Zo}, ParameterVector{Xi, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); @@ -219,13 +228,13 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -238,20 +247,20 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); Shape out0_shape{1}; Shape out1_shape{1}; auto results = ResultVector{result0, result1}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); @@ -265,30 +274,30 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes // concat output has only dynamic rank, another outputs are static TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{1}); - auto Y = make_shared(element::f32, Shape{1}); - auto M = make_shared(element::f32, Shape{1}); + auto X = make_shared(element::f32, Shape{1}); + auto Y = make_shared(element::f32, Shape{1}); + auto M = make_shared(element::f32, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto condition_const = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); - auto body_condition = std::make_shared(M_body, condition_const); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto condition_const = std::make_shared(element::f32, Shape{1}, 10); + auto body_condition = std::make_shared(M_body, condition_const); + + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{1}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{body_condition, Zo}, - ParameterVector{current_iteration, Xi, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{body_condition, Zo}, ParameterVector{current_iteration, Xi, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); @@ -298,13 +307,13 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shape for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -318,22 +327,22 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shape for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); Shape out0_shape{1}; Shape out1_shape{1}; PartialShape out2_shape{PartialShape::dynamic(1)}; auto results = ResultVector{result0, result1, result2}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); EXPECT_EQ(result2->get_output_partial_shape(0), out2_shape); @@ -350,30 +359,30 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shape // concat output has dynamic dimension on axis position, another outputs are static TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_partially_dynamic_shapes) { // That which we iterate over - auto X = make_shared(element::f32, PartialShape{Dimension::dynamic()}); - auto Y = make_shared(element::f32, PartialShape{Dimension::dynamic()}); - auto M = make_shared(element::f32, Shape{1}); + auto X = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto Y = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto M = make_shared(element::f32, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto condition_const = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); - auto body_condition = std::make_shared(M_body, condition_const); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto condition_const = std::make_shared(element::f32, Shape{1}, 10); + auto body_condition = std::make_shared(M_body, condition_const); + + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{1}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{body_condition, Zo}, - ParameterVector{current_iteration, Xi, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{body_condition, Zo}, ParameterVector{current_iteration, Xi, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); @@ -383,13 +392,13 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_partially_dyn for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -404,22 +413,22 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_partially_dyn for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); Shape out0_shape{1}; PartialShape out1_shape{Dimension::dynamic()}; PartialShape out2_shape{Dimension::dynamic()}; auto results = ResultVector{result0, result1, result2}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_partial_shape(0), out1_shape); EXPECT_EQ(result2->get_output_partial_shape(0), out2_shape); @@ -436,30 +445,30 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_partially_dyn // Axis of silced output is set as incorrect TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_incorrect_sliced_output_axis) { // That which we iterate over - auto X = make_shared(element::f32, PartialShape{1, 2, 3, Dimension::dynamic()}); - auto Y = make_shared(element::f32, PartialShape{1, 2, 3, Dimension::dynamic()}); - auto M = make_shared(element::f32, Shape{1}); + auto X = make_shared(element::f32, PartialShape{1, 2, 3, Dimension::dynamic()}); + auto Y = make_shared(element::f32, PartialShape{1, 2, 3, Dimension::dynamic()}); + auto M = make_shared(element::f32, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto condition_const = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); - auto body_condition = std::make_shared(M_body, condition_const); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto condition_const = std::make_shared(element::f32, Shape{1}, 10); + auto body_condition = std::make_shared(M_body, condition_const); + + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{1}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{body_condition, Zo}, - ParameterVector{current_iteration, Xi, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{body_condition, Zo}, ParameterVector{current_iteration, Xi, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); @@ -482,29 +491,29 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_incorrect_sli // concat output will be dynamic, another outputs are static TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, -1); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared(element::boolean, Shape{1}, true); + + auto trip_count = std::make_shared(element::i64, Shape{1}, -1); + auto exec_condition = std::make_shared(element::boolean, Shape{1}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{body_condition, Zo}, - ParameterVector{current_iteration, Xi, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{body_condition, Zo}, ParameterVector{current_iteration, Xi, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); @@ -514,13 +523,13 @@ TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) { for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -536,22 +545,22 @@ TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) { for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); Shape out0_shape{1}; Shape out1_shape{32, 1, 10}; PartialShape out2_shape{32, Dimension::dynamic(), 10}; auto results = ResultVector{result0, result1, result2}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); EXPECT_EQ(result2->get_output_partial_shape(0), out2_shape); @@ -568,29 +577,29 @@ TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) { // all shapes are static, 10 iterations will be executed TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); + auto current_iteration = make_shared(element::i64, Shape{1}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared(element::boolean, Shape{1}, true); + + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{1}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{Zo, body_condition}, - ParameterVector{Xi, current_iteration, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{Zo, body_condition}, ParameterVector{Xi, current_iteration, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{1, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{1, 1}); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); @@ -600,13 +609,13 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -622,22 +631,22 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); Shape out0_shape{1}; Shape out1_shape{32, 1, 10}; Shape out2_shape{32, 10, 10}; auto results = ResultVector{result0, result1, result2}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); EXPECT_EQ(result2->get_output_shape(0), out2_shape); @@ -654,29 +663,29 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports // all shapes are static, 10 iterations will be executed TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports_scalars) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 1, 10}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto current_iteration = make_shared(element::i64, Shape{}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared(element::boolean, Shape{}, true); + + auto trip_count = std::make_shared(element::i64, Shape{}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{Zo, body_condition}, - ParameterVector{Xi, current_iteration, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{Zo, body_condition}, ParameterVector{Xi, current_iteration, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{1, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{1, 1}); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); @@ -686,13 +695,13 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -708,22 +717,22 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); Shape out0_shape{}; Shape out1_shape{32, 1, 10}; Shape out2_shape{32, 10, 10}; auto results = ResultVector{result0, result1, result2}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); EXPECT_EQ(result2->get_output_shape(0), out2_shape); @@ -740,29 +749,29 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports // all shapes are static, 10 iterations will be executed TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 10, 1}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::f32, Shape{32, 10, 1}); + auto M = make_shared(element::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); - - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto current_iteration = make_shared(element::i64, Shape{}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared(element::boolean, Shape{}, true); + + auto trip_count = std::make_shared(element::i64, Shape{}, 10); + auto exec_condition = std::make_shared(element::boolean, Shape{}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{Zo, body_condition, sum}, - ParameterVector{Xi, current_iteration, Yi, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{Zo, body_condition, sum}, ParameterVector{Xi, current_iteration, Yi, M_body}); - auto loop = make_shared(trip_count, exec_condition); + auto loop = make_shared(trip_count, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{1, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{1, 1}); loop->set_sliced_input(Xi, X, 0, 1, 1, -1, 2); loop->set_sliced_input(Yi, Y, -1, -1, 1, 0, 1); @@ -772,13 +781,13 @@ TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs) { for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -795,24 +804,24 @@ TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs) { for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); - auto result3 = make_shared(out3); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + auto result3 = make_shared(out3); Shape out0_shape{}; Shape out1_shape{32, 1, 10}; Shape out2_shape{32, 10, 10}; Shape out3_shape{32, 1, 1}; auto results = ResultVector{result0, result1, result2, result3}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); EXPECT_EQ(result2->get_output_shape(0), out2_shape); @@ -832,30 +841,30 @@ TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs) { // count will be executed TEST(type_prop, loop_operation_dynamic_iter_dynamic_batch_shapes_sliced_inputs_concatenated_outputs) { // That which we iterate over - auto X = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1, 10}); - auto Y = make_shared(element::f32, PartialShape{32, Dimension::dynamic(), 10}); - auto M = make_shared(element::f32, PartialShape{32, 1, 10}); - auto T = make_shared(element::i64, Shape{}); + auto X = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1, 10}); + auto Y = make_shared(element::f32, PartialShape{32, Dimension::dynamic(), 10}); + auto M = make_shared(element::f32, PartialShape{32, 1, 10}); + auto T = make_shared(element::i64, Shape{}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::i64, Shape{}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = make_shared(element::boolean, Shape{}, true); - auto exec_condition = make_shared(element::boolean, Shape{}, true); + auto body_condition = make_shared(element::boolean, Shape{}, true); + auto exec_condition = make_shared(element::boolean, Shape{}, true); // Body - auto sum = make_shared(Xi, Yi); - auto Zo = make_shared(sum, M_body); - auto body = make_shared(OutputVector{Zo, body_condition, sum}, - ParameterVector{Xi, Yi, current_iteration, M_body}); + auto sum = make_shared(Xi, Yi); + auto Zo = make_shared(sum, M_body); + auto body = + make_shared(OutputVector{Zo, body_condition, sum}, ParameterVector{Xi, Yi, current_iteration, M_body}); - auto loop = make_shared(T, exec_condition); + auto loop = make_shared(T, exec_condition); loop->set_function(body); - loop->set_special_body_ports(opset5::Loop::SpecialBodyPorts{2, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{2, 1}); loop->set_sliced_input(Xi, X, 0, 1, 1, -1, 0); loop->set_sliced_input(Yi, Y, -1, -1, 1, 0, 1); @@ -865,13 +874,13 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_batch_shapes_sliced_inputs_c for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -888,24 +897,24 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_batch_shapes_sliced_inputs_c for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); - auto result3 = make_shared(out3); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + auto result3 = make_shared(out3); Shape out0_shape{}; Shape out1_shape{32, 1, 10}; PartialShape out2_shape{32, Dimension::dynamic(), 10}; Shape out3_shape{32, 1, 10}; auto results = ResultVector{result0, result1, result2, result3}; - auto f = make_shared(results, ParameterVector{X, Y, T, M}); + auto f = make_shared(results, ParameterVector{X, Y, T, M}); EXPECT_EQ(f->get_output_size(), 4); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); @@ -937,24 +946,25 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_batch_shapes_sliced_inputs_c // count will be executed TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_sliced_inputs_concatenated_outputs) { // That which we iterate over - auto X = make_shared(element::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 10}); - auto T = make_shared(element::i64, Shape{}); + auto X = + make_shared(element::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 10}); + auto T = make_shared(element::i64, Shape{}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::i64, Shape{}); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = make_shared(element::boolean, Shape{}, true); - auto exec_condition = make_shared(element::boolean, Shape{}, true); + auto body_condition = make_shared(element::boolean, Shape{}, true); + auto exec_condition = make_shared(element::boolean, Shape{}, true); // Body - auto Zo = make_shared(Xi, Xi); - auto body = make_shared(OutputVector{Zo, body_condition}, ParameterVector{Xi, current_iteration}); + auto Zo = make_shared(Xi, Xi); + auto body = make_shared(OutputVector{Zo, body_condition}, ParameterVector{Xi, current_iteration}); - auto loop = make_shared(T, exec_condition); + auto loop = make_shared(T, exec_condition); loop->set_function(body); - loop->set_special_body_ports(opset5::Loop::SpecialBodyPorts{1, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{1, 1}); loop->set_sliced_input(Xi, X, 0, 1, 1, -1, 0); @@ -962,13 +972,13 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_sliced_inputs_concate for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -984,22 +994,22 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_sliced_inputs_concate for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); Shape out0_shape{}; PartialShape out1_shape{1, Dimension::dynamic(), 10}; PartialShape out2_shape{Dimension::dynamic(), Dimension::dynamic(), 10}; auto results = ResultVector{result0, result1, result2}; - auto f = make_shared(results, ParameterVector{X, T}); + auto f = make_shared(results, ParameterVector{X, T}); EXPECT_EQ(f->get_output_size(), 3); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_partial_shape(0), out1_shape); @@ -1025,39 +1035,39 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_sliced_inputs_concate // parameter, other shapes are static TEST(type_prop, loop_operation_dynamic_iter_static_shapes_inputs_dynamic_shape_outputs) { // That which we iterate over - auto X = make_shared(element::f32, PartialShape{1, 1, 10}); - auto T = make_shared(element::i64, Shape{}); + auto X = make_shared(element::f32, PartialShape{1, 1, 10}); + auto T = make_shared(element::i64, Shape{}); // Set up the cell body, a function from (Xi) -> Concat(Xi, C) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 10}); + auto Xi = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 10}); - auto body_condition = make_shared(element::boolean, Shape{}, true); - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = make_shared(element::boolean, Shape{}, true); + auto body_condition = make_shared(element::boolean, Shape{}, true); + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = make_shared(element::boolean, Shape{}, true); // Body - auto C = opset5::Constant::create(element::f32, {1, 1, 10}, {0}); - auto Zo = make_shared(NodeVector{Xi, C}, 1); - auto Z = make_shared(Zo); - auto body = make_shared(OutputVector{Z, body_condition}, ParameterVector{Xi}); + auto C = ov::op::v0::Constant::create(element::f32, {1, 1, 10}, {0}); + auto Zo = make_shared(NodeVector{Xi, C}, 1); + auto Z = make_shared(Zo); + auto body = make_shared(OutputVector{Z, body_condition}, ParameterVector{Xi}); - auto loop = make_shared(T, exec_condition); + auto loop = make_shared(T, exec_condition); loop->set_function(body); - loop->set_special_body_ports(opset5::Loop::SpecialBodyPorts{-1, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 1}); loop->set_merged_input(Xi, X, Z); // check input descriptors for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -1070,20 +1080,20 @@ TEST(type_prop, loop_operation_dynamic_iter_static_shapes_inputs_dynamic_shape_o for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); Shape out0_shape{}; PartialShape out1_shape{1, Dimension::dynamic(), 10}; auto results = ResultVector{result0, result1}; - auto f = make_shared(results, ParameterVector{X, T}); + auto f = make_shared(results, ParameterVector{X, T}); EXPECT_EQ(f->get_output_size(), 2); EXPECT_EQ(result0->get_output_shape(0), out0_shape); // should be dynamic @@ -1109,38 +1119,38 @@ TEST(type_prop, loop_operation_dynamic_iter_static_shapes_inputs_dynamic_shape_o // parameter, one dynamic shape and one static shape TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_inputs_dynamic_shape_outputs) { // That which we iterate over - auto X = make_shared(element::f32, PartialShape{-1, 1, 10}); - auto T = make_shared(element::i64, Shape{}); + auto X = make_shared(element::f32, PartialShape{-1, 1, 10}); + auto T = make_shared(element::i64, Shape{}); // Set up the cell body, a function from (Xi) -> Concat(Xi, Xi, 1) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, PartialShape{-1, -1, 10}); + auto Xi = make_shared(element::f32, PartialShape{-1, -1, 10}); - auto body_condition = make_shared(element::boolean, Shape{}, true); - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = make_shared(element::boolean, Shape{}, true); + auto body_condition = make_shared(element::boolean, Shape{}, true); + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = make_shared(element::boolean, Shape{}, true); // Body - auto Zo = make_shared(NodeVector{Xi, Xi}, 1); - auto Z = make_shared(Zo); - auto body = make_shared(OutputVector{Z, body_condition}, ParameterVector{Xi}); + auto Zo = make_shared(NodeVector{Xi, Xi}, 1); + auto Z = make_shared(Zo); + auto body = make_shared(OutputVector{Z, body_condition}, ParameterVector{Xi}); - auto loop = make_shared(T, exec_condition); + auto loop = make_shared(T, exec_condition); loop->set_function(body); - loop->set_special_body_ports(opset5::Loop::SpecialBodyPorts{-1, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 1}); loop->set_merged_input(Xi, X, Z); // check input descriptors for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -1153,20 +1163,20 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_inputs_dynamic_shape_ for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); Shape out0_shape{}; PartialShape out1_shape{-1, -1, 10}; auto results = ResultVector{result0, result1}; - auto f = make_shared(results, ParameterVector{X, T}); + auto f = make_shared(results, ParameterVector{X, T}); EXPECT_EQ(f->get_output_size(), 2); EXPECT_EQ(result0->get_output_shape(0), out0_shape); // should be dynamic @@ -1209,31 +1219,31 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_inputs_dynamic_shape_ // backedge to Parameter1 backedge to Parameter2 TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes2_inputs_dynamic_shape_outputs3) { // That which we iterate over - auto X0 = make_shared(element::f32, PartialShape{-1, 1, 10}); - auto X1 = make_shared(element::f32, PartialShape{-1, 1, 10}); - auto T = make_shared(element::i64, Shape{}); + auto X0 = make_shared(element::f32, PartialShape{-1, 1, 10}); + auto X1 = make_shared(element::f32, PartialShape{-1, 1, 10}); + auto T = make_shared(element::i64, Shape{}); // Set up the cell body, a function from (Xi0) -> Concat(Xi0, Xi0, 1) -> (Zo0) // (Xi1) -> Concat(Xi1, Xi1, 1) -> (Zo1) // Body parameters - auto Xi0 = make_shared(element::f32, PartialShape{-1, 1, 10}); - auto Xi1 = make_shared(element::f32, PartialShape{-1, 1, 10}); + auto Xi0 = make_shared(element::f32, PartialShape{-1, 1, 10}); + auto Xi1 = make_shared(element::f32, PartialShape{-1, 1, 10}); - auto body_condition = make_shared(element::boolean, Shape{}, true); - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = make_shared(element::boolean, Shape{}, true); + auto body_condition = make_shared(element::boolean, Shape{}, true); + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = make_shared(element::boolean, Shape{}, true); // Body - auto Zo0 = make_shared(NodeVector{Xi0, Xi0}, 1); - auto Zo1 = make_shared(NodeVector{Xi1, Xi1}, 1); - auto Y = make_shared(Zo0); - auto Z0 = make_shared(Zo0); - auto Z1 = make_shared(Zo1); - auto body = make_shared(OutputVector{Y, Z0, Z1, body_condition}, ParameterVector{Xi0, Xi1}); - - auto loop = make_shared(T, exec_condition); + auto Zo0 = make_shared(NodeVector{Xi0, Xi0}, 1); + auto Zo1 = make_shared(NodeVector{Xi1, Xi1}, 1); + auto Y = make_shared(Zo0); + auto Z0 = make_shared(Zo0); + auto Z1 = make_shared(Zo1); + auto body = make_shared(OutputVector{Y, Z0, Z1, body_condition}, ParameterVector{Xi0, Xi1}); + + auto loop = make_shared(T, exec_condition); loop->set_function(body); - loop->set_special_body_ports(opset5::Loop::SpecialBodyPorts{-1, 3}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 3}); loop->set_merged_input(Xi0, X0, Z0); loop->set_merged_input(Xi1, X1, Z1); @@ -1241,13 +1251,13 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes2_inputs_dynamic_shape for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -1262,22 +1272,22 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes2_inputs_dynamic_shape for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); - auto result2 = make_shared(out2); - auto result3 = make_shared(out3); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); + auto result2 = make_shared(out2); + auto result3 = make_shared(out3); Shape out0_shape{}; PartialShape out1_shape{-1, -1, 10}; auto results = ResultVector{result0, result1, result2, result3}; - auto f = make_shared(results, ParameterVector{X0, X1, T}); + auto f = make_shared(results, ParameterVector{X0, X1, T}); EXPECT_EQ(f->get_output_size(), 4); EXPECT_EQ(result0->get_output_shape(0), out0_shape); // should be dynamic @@ -1308,39 +1318,39 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes2_inputs_dynamic_shape // parameter, one dynamic shape and one static shape TEST(type_prop, loop_operation_dynamic_iter_1d_shapes_inputs_dynamic_shape_outputs) { // That which we iterate over - auto X = make_shared(element::f32, PartialShape{1}); - auto T = make_shared(element::i64, Shape{}); + auto X = make_shared(element::f32, PartialShape{1}); + auto T = make_shared(element::i64, Shape{}); // Set up the cell body, a function from (Xi) -> Concat(Xi, Xi, 1) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, PartialShape{1}); + auto Xi = make_shared(element::f32, PartialShape{1}); - auto body_condition = make_shared(element::boolean, Shape{}, true); - auto trip_count = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); - auto exec_condition = make_shared(element::boolean, Shape{}, true); + auto body_condition = make_shared(element::boolean, Shape{}, true); + auto trip_count = std::make_shared(element::i64, Shape{1}, 10); + auto exec_condition = make_shared(element::boolean, Shape{}, true); // Body - auto X0 = make_shared(Xi, opset5::Constant::create(ov::element::i32, {1}, {-1}), false); - auto Zo = make_shared(NodeVector{X0, X0}, 0); - auto Z = make_shared(Zo); - auto body = make_shared(OutputVector{Z, body_condition}, ParameterVector{Xi}); + auto X0 = make_shared(Xi, ov::op::v0::Constant::create(ov::element::i32, {1}, {-1}), false); + auto Zo = make_shared(NodeVector{X0, X0}, 0); + auto Z = make_shared(Zo); + auto body = make_shared(OutputVector{Z, body_condition}, ParameterVector{Xi}); - auto loop = make_shared(T, exec_condition); + auto loop = make_shared(T, exec_condition); loop->set_function(body); - loop->set_special_body_ports(opset5::Loop::SpecialBodyPorts{-1, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 1}); loop->set_merged_input(Xi, X, Z); // check input descriptors for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -1353,20 +1363,20 @@ TEST(type_prop, loop_operation_dynamic_iter_1d_shapes_inputs_dynamic_shape_outpu for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); Shape out0_shape{}; PartialShape out1_shape{-1}; auto results = ResultVector{result0, result1}; - auto f = make_shared(results, ParameterVector{X, T}); + auto f = make_shared(results, ParameterVector{X, T}); EXPECT_EQ(f->get_output_size(), 2); EXPECT_EQ(result0->get_output_shape(0), out0_shape); // should be dynamic @@ -1395,25 +1405,26 @@ TEST(type_prop, loop_operation_dynamic_iter_1d_shapes_inputs_dynamic_shape_outpu // } TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_unsqueeze) { // Inner model - const auto inner_parameter = std::make_shared(element::dynamic, ov::PartialShape::dynamic()); + const auto inner_parameter = std::make_shared(element::dynamic, ov::PartialShape::dynamic()); const auto unsqueeze = - std::make_shared(inner_parameter, opset5::Constant::create(element::i64, {1}, {0})); - const auto true_const = opset5::Constant::create(element::boolean, {1}, {1}); - auto body = std::make_shared(OutputVector{unsqueeze, true_const}, ParameterVector{inner_parameter}); + std::make_shared(inner_parameter, ov::op::v0::Constant::create(element::i64, {1}, {0})); + const auto true_const = ov::op::v0::Constant::create(element::boolean, {1}, {1}); + auto body = std::make_shared(OutputVector{unsqueeze, true_const}, ParameterVector{inner_parameter}); // Outer model - const auto outer_parameter = std::make_shared(element::dynamic, ov::PartialShape::dynamic(2)); + const auto outer_parameter = + std::make_shared(element::dynamic, ov::PartialShape::dynamic(2)); - const auto trip_count = opset5::Constant::create(element::i64, {1}, {-1}); - const auto execution_condition = opset5::Constant::create(element::boolean, {1}, {1}); - const auto loop = std::make_shared(trip_count, execution_condition); + const auto trip_count = ov::op::v0::Constant::create(element::i64, {1}, {-1}); + const auto execution_condition = ov::op::v0::Constant::create(element::boolean, {1}, {1}); + const auto loop = std::make_shared(trip_count, execution_condition); loop->set_function(body); loop->set_merged_input(inner_parameter, outer_parameter, unsqueeze); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 1}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 1}); - auto outer_result = make_shared(loop->get_iter_value(unsqueeze, -1)); + auto outer_result = make_shared(loop->get_iter_value(unsqueeze, -1)); - auto outer_model = std::make_shared(ResultVector{outer_result}, ParameterVector{outer_parameter}); + auto outer_model = std::make_shared(ResultVector{outer_result}, ParameterVector{outer_parameter}); PartialShape outer_shape = PartialShape::dynamic(); EXPECT_EQ(outer_model->get_output_size(), 1); EXPECT_EQ(outer_result->get_output_partial_shape(0), outer_shape); diff --git a/src/core/tests/type_prop/lrn.cpp b/src/core/tests/type_prop/lrn.cpp index 19e47bf6bd1f28..22458274cac023 100644 --- a/src/core/tests/type_prop/lrn.cpp +++ b/src/core/tests/type_prop/lrn.cpp @@ -2,20 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/lrn.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, lrn_invalid_axes_rank) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto axes = make_shared(element::f32, Shape{1, 2}); + auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto axes = make_shared(element::f32, Shape{1, 2}); double alpha = 0.1, beta = 0.2, bias = 0.3; size_t size = 3; try { - auto lrn = make_shared(data, axes, alpha, beta, bias, size); + auto lrn = make_shared(data, axes, alpha, beta, bias, size); // Should have thrown, so fail if it didn't FAIL() << "Invalid input tensor rank not detected"; } catch (const NodeValidationFailure& error) { @@ -24,9 +25,9 @@ TEST(type_prop, lrn_invalid_axes_rank) { FAIL() << "Deduced type check failed for unexpected reason"; } - axes = make_shared(element::f32, Shape{5}); + axes = make_shared(element::f32, Shape{5}); try { - auto lrn = make_shared(data, axes, alpha, beta, bias, size); + auto lrn = make_shared(data, axes, alpha, beta, bias, size); // Should have thrown, so fail if it didn't FAIL() << "Invalid input tensor rank not detected"; } catch (const NodeValidationFailure& error) { @@ -37,12 +38,12 @@ TEST(type_prop, lrn_invalid_axes_rank) { } TEST(type_prop, lrn_incorrect_axes_value) { - auto data = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{3, 4}); + auto data = make_shared(element::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::i64, Shape{2}, vector{3, 4}); double alpha = 0.1, beta = 0.2, bias = 0.3; size_t size = 3; try { - auto lrn = make_shared(data, axes, alpha, beta, bias, size); + auto lrn = make_shared(data, axes, alpha, beta, bias, size); // Should have thrown, so fail if it didn't FAIL() << "Invalid input tensor rank not detected"; } catch (const NodeValidationFailure& error) { diff --git a/src/core/tests/type_prop/lstm_cell.cpp b/src/core/tests/type_prop/lstm_cell.cpp index e81db6aacbcc51..c39ea6d7e23310 100644 --- a/src/core/tests/type_prop/lstm_cell.cpp +++ b/src/core/tests/type_prop/lstm_cell.cpp @@ -3,12 +3,10 @@ // #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset4.hpp" +#include "openvino/opsets/opset4.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, lstm_cell) { const size_t batch_size = 2; diff --git a/src/core/tests/type_prop/lstm_sequence.cpp b/src/core/tests/type_prop/lstm_sequence.cpp index e5acbb6eae5339..d92b294bb42932 100644 --- a/src/core/tests/type_prop/lstm_sequence.cpp +++ b/src/core/tests/type_prop/lstm_sequence.cpp @@ -3,13 +3,11 @@ // #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset1.hpp" -#include "ngraph/opsets/opset5.hpp" +#include "openvino/opsets/opset1.hpp" +#include "openvino/opsets/opset5.hpp" using namespace std; -using namespace ngraph; +using namespace ov; // // RNN sequence parameters diff --git a/src/core/tests/type_prop/matmul.cpp b/src/core/tests/type_prop/matmul.cpp index 3966804d1bed7d..41d3789601d086 100644 --- a/src/core/tests/type_prop/matmul.cpp +++ b/src/core/tests/type_prop/matmul.cpp @@ -2,100 +2,105 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/matmul.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, matmul_2D_same) { - auto A = make_shared(element::f32, Shape{2, 2}); - auto B = make_shared(element::f32, Shape{2, 2}); + auto A = make_shared(element::f32, Shape{2, 2}); + auto B = make_shared(element::f32, Shape{2, 2}); - auto matmul = make_shared(A, B); + auto matmul = make_shared(A, B); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2})); } TEST(type_prop, matmul_4D_same) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 3}); - auto B = make_shared(element::f32, Shape{2, 2, 3, 3}); + auto A = make_shared(element::f32, Shape{2, 2, 3, 3}); + auto B = make_shared(element::f32, Shape{2, 2, 3, 3}); - auto matmul = make_shared(A, B); + auto matmul = make_shared(A, B); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 3})); } TEST(type_prop, matmul_2D) { - auto A = make_shared(element::f32, Shape{3, 6}); - auto B = make_shared(element::f32, Shape{6, 4}); + auto A = make_shared(element::f32, Shape{3, 6}); + auto B = make_shared(element::f32, Shape{6, 4}); - auto matmul = make_shared(A, B); + auto matmul = make_shared(A, B); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 6}); - auto B = make_shared(element::f32, Shape{2, 2, 6, 4}); + auto A = make_shared(element::f32, Shape{2, 2, 3, 6}); + auto B = make_shared(element::f32, Shape{2, 2, 6, 4}); - auto matmul = make_shared(A, B); + auto matmul = make_shared(A, B); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); } TEST(type_prop, matmul_5D_x_3D_transpose_a_transpose_b) { - auto A = make_shared(element::f32, Shape{2, 1, 6, 3}); - auto B = make_shared(element::f32, Shape{7, 1, 5, 4, 6}); + auto A = make_shared(element::f32, Shape{2, 1, 6, 3}); + auto B = make_shared(element::f32, Shape{7, 1, 5, 4, 6}); - auto matmul = make_shared(A, B, true, true); + auto matmul = make_shared(A, B, true, true); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{7, 2, 5, 3, 4})); } TEST(type_prop, matmul_2D_transpose_a) { - auto A = make_shared(element::f32, Shape{6, 3}); - auto B = make_shared(element::f32, Shape{6, 4}); + auto A = make_shared(element::f32, Shape{6, 3}); + auto B = make_shared(element::f32, Shape{6, 4}); - auto matmul = make_shared(A, B, 1); + auto matmul = make_shared(A, B, 1); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D_transpose_a) { - auto A = make_shared(element::f32, Shape{2, 2, 6, 3}); - auto B = make_shared(element::f32, Shape{2, 2, 6, 4}); + auto A = make_shared(element::f32, Shape{2, 2, 6, 3}); + auto B = make_shared(element::f32, Shape{2, 2, 6, 4}); - auto matmul = make_shared(A, B, 1); + auto matmul = make_shared(A, B, 1); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); } TEST(type_prop, matmul_2D_transpose_b) { - auto A = make_shared(element::f32, Shape{3, 6}); - auto B = make_shared(element::f32, Shape{4, 6}); + auto A = make_shared(element::f32, Shape{3, 6}); + auto B = make_shared(element::f32, Shape{4, 6}); - auto matmul = make_shared(A, B, 0, 1); + auto matmul = make_shared(A, B, 0, 1); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D_transpose_b) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 6}); - auto B = make_shared(element::f32, Shape{2, 2, 4, 6}); + auto A = make_shared(element::f32, Shape{2, 2, 3, 6}); + auto B = make_shared(element::f32, Shape{2, 2, 4, 6}); - auto matmul = make_shared(A, B, 0, 1); + auto matmul = make_shared(A, B, 0, 1); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); @@ -103,10 +108,10 @@ TEST(type_prop, matmul_4D_transpose_b) { TEST(type_prop, matmul_dynamic_5D_transpose_b) { Dimension dynamic = Dimension::dynamic(); - auto A = make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); - auto B = make_shared(element::f32, PartialShape{1, dynamic, dynamic, 4, 6}); + auto A = make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); + auto B = make_shared(element::f32, PartialShape{1, dynamic, dynamic, 4, 6}); - auto matmul = make_shared(A, B, 0, 1); + auto matmul = make_shared(A, B, 0, 1); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{Dimension(-1), 4, dynamic, dynamic, 4})); @@ -114,10 +119,10 @@ TEST(type_prop, matmul_dynamic_5D_transpose_b) { TEST(type_prop, matmul_dynamic_2D_transpose_a) { Dimension dynamic = Dimension::dynamic(); - auto A = make_shared(element::f32, PartialShape{dynamic, 3}); - auto B = make_shared(element::f32, PartialShape{4, dynamic}); + auto A = make_shared(element::f32, PartialShape{dynamic, 3}); + auto B = make_shared(element::f32, PartialShape{4, dynamic}); - auto matmul = make_shared(A, B, 1, 0); + auto matmul = make_shared(A, B, 1, 0); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{3, dynamic})); @@ -125,10 +130,10 @@ TEST(type_prop, matmul_dynamic_2D_transpose_a) { TEST(type_prop, matmul_dynamic_1D_3D) { Dimension dynamic = Dimension::dynamic(); - auto A = make_shared(element::f32, PartialShape{dynamic}); - auto B = make_shared(element::f32, PartialShape{2, 4, dynamic}); + auto A = make_shared(element::f32, PartialShape{dynamic}); + auto B = make_shared(element::f32, PartialShape{2, 4, dynamic}); - auto matmul = make_shared(A, B); + auto matmul = make_shared(A, B); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{2, dynamic})); @@ -137,54 +142,54 @@ TEST(type_prop, matmul_dynamic_1D_3D) { // Transpose attributes are ignored for 1D // 1D x 1D TEST(type_prop, matmul_1D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::f32, Shape{1}); + auto B = make_shared(element::f32, Shape{1}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_false_true) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::f32, Shape{1}); + auto B = make_shared(element::f32, Shape{1}); - auto matmul = make_shared(A, B, false, true); + auto matmul = make_shared(A, B, false, true); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_true_false) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::f32, Shape{1}); + auto B = make_shared(element::f32, Shape{1}); - auto matmul = make_shared(A, B, true, false); + auto matmul = make_shared(A, B, true, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_true_true) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::f32, Shape{1}); + auto B = make_shared(element::f32, Shape{1}); - auto matmul = make_shared(A, B, true, true); + auto matmul = make_shared(A, B, true, true); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_incompatible) { - auto A = make_shared(element::f32, Shape{3}); - auto B = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::f32, Shape{3}); + auto B = make_shared(element::f32, Shape{4}); try { - auto matmul = make_shared(A, B); + auto matmul = make_shared(A, B); // Should have thrown, so fail if it didn't FAIL() << "Incompatible matrix dimensions not detected. "; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Incompatible MatMul matrix dimension")); } catch (...) { FAIL() << "MatMul shape validation failed for unexpected reason"; @@ -193,34 +198,34 @@ TEST(type_prop, matmul_1D_x_1D_incompatible) { // 2D x 1D TEST(type_prop, matmul_2D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::f32, Shape{1, 2}); + auto B = make_shared(element::f32, Shape{2}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_2D_x_1D_false_true) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::f32, Shape{1, 2}); + auto B = make_shared(element::f32, Shape{2}); - auto matmul = make_shared(A, B, false, true); + auto matmul = make_shared(A, B, false, true); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_2D_x_1D_true_false) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::f32, Shape{1, 2}); + auto B = make_shared(element::f32, Shape{2}); try { - auto matmul = make_shared(A, B, true, false); + auto matmul = make_shared(A, B, true, false); // Should have thrown, so fail if it didn't FAIL() << "Incompatible matrix dimensions not detected. "; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Incompatible MatMul matrix dimension")); } catch (...) { FAIL() << "MatMul shape validation failed for unexpected reason"; @@ -228,14 +233,14 @@ TEST(type_prop, matmul_2D_x_1D_true_false) { } TEST(type_prop, matmul_2D_x_1D_true_true) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::f32, Shape{1, 2}); + auto B = make_shared(element::f32, Shape{2}); try { - auto matmul = make_shared(A, B, true, true); + auto matmul = make_shared(A, B, true, true); // Should have thrown, so fail if it didn't FAIL() << "Incompatible matrix dimensions not detected. "; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Incompatible MatMul matrix dimension")); } catch (...) { FAIL() << "MatMul shape validation failed for unexpected reason"; @@ -244,24 +249,24 @@ TEST(type_prop, matmul_2D_x_1D_true_true) { // 1D x 2D TEST(type_prop, matmul_1D_x_2D_false_false) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::f32, Shape{2}); + auto B = make_shared(element::f32, Shape{2, 1}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_1D_x_2D_false_true) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::f32, Shape{2}); + auto B = make_shared(element::f32, Shape{2, 1}); try { - auto matmul = make_shared(A, B, false, true); + auto matmul = make_shared(A, B, false, true); // Should have thrown, so fail if it didn't FAIL() << "Incompatible matrix dimensions not detected. "; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Incompatible MatMul matrix dimension")); } catch (...) { FAIL() << "MatMul shape validation failed for unexpected reason"; @@ -269,23 +274,23 @@ TEST(type_prop, matmul_1D_x_2D_false_true) { } TEST(type_prop, matmul_1D_x_2D_true_false) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); - auto matmul = make_shared(A, B, true, false); + auto A = make_shared(element::f32, Shape{2}); + auto B = make_shared(element::f32, Shape{2, 1}); + auto matmul = make_shared(A, B, true, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_1D_x_2D_true_true) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::f32, Shape{2}); + auto B = make_shared(element::f32, Shape{2, 1}); try { - auto matmul = make_shared(A, B, true, true); + auto matmul = make_shared(A, B, true, true); // Should have thrown, so fail if it didn't FAIL() << "Incompatible matrix dimensions not detected. "; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Incompatible MatMul matrix dimension")); } catch (...) { FAIL() << "MatMul shape validation failed for unexpected reason"; @@ -294,10 +299,10 @@ TEST(type_prop, matmul_1D_x_2D_true_true) { // 1D x 4D TEST(type_prop, matmul_1D_x_4D_false_false) { - auto A = make_shared(element::f32, Shape{3}); - auto B = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto A = make_shared(element::f32, Shape{3}); + auto B = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1, 2, 4})); @@ -305,10 +310,10 @@ TEST(type_prop, matmul_1D_x_4D_false_false) { // 4D x 1D TEST(type_prop, matmul_4D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto B = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto B = make_shared(element::f32, Shape{4}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1, 2, 3})); @@ -316,44 +321,44 @@ TEST(type_prop, matmul_4D_x_1D_false_false) { // Batch broadcast TEST(type_prop, matmul_batch_broadcast) { - auto A = make_shared(element::f32, Shape{5, 1, 1, 4, 3}); - auto B = make_shared(element::f32, Shape{1, 1, 6, 3, 2}); + auto A = make_shared(element::f32, Shape{5, 1, 1, 4, 3}); + auto B = make_shared(element::f32, Shape{1, 1, 6, 3, 2}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{5, 1, 6, 4, 2})); } TEST(type_prop, matmul_batch_broadcast_expand_to_A) { - auto A = make_shared(element::f32, Shape{1, 4, 3}); - auto B = make_shared(element::f32, Shape{7, 8, 5, 3, 2}); + auto A = make_shared(element::f32, Shape{1, 4, 3}); + auto B = make_shared(element::f32, Shape{7, 8, 5, 3, 2}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{7, 8, 5, 4, 2})); } TEST(type_prop, matmul_batch_broadcast_expand_to_B) { - auto A = make_shared(element::f32, Shape{8, 7, 6, 1, 4, 3}); - auto B = make_shared(element::f32, Shape{1, 5, 3, 2}); + auto A = make_shared(element::f32, Shape{8, 7, 6, 1, 4, 3}); + auto B = make_shared(element::f32, Shape{1, 5, 3, 2}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_shape(), (Shape{8, 7, 6, 5, 4, 2})); } TEST(type_prop, matmul_incompatible_batch_dims) { - auto A = make_shared(element::f32, Shape{7, 4, 3}); - auto B = make_shared(element::f32, Shape{6, 3, 2}); + auto A = make_shared(element::f32, Shape{7, 4, 3}); + auto B = make_shared(element::f32, Shape{6, 3, 2}); try { - auto matmul = make_shared(A, B); + auto matmul = make_shared(A, B); // Should have thrown, so fail if it didn't FAIL() << "Incompatible batch dimensions not detected. "; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Incompatible MatMul batch dimension")); } catch (...) { FAIL() << "MatMul shape validation failed for unexpected reason"; @@ -361,10 +366,10 @@ TEST(type_prop, matmul_incompatible_batch_dims) { } TEST(type_prop, matmul_matrix_dynamic_bounds) { - auto A = make_shared(element::f32, PartialShape{Dimension(2, 5), Dimension(6, 10)}); - auto B = make_shared(element::f32, PartialShape{Dimension(7, 8), Dimension(15, 20)}); + auto A = make_shared(element::f32, PartialShape{Dimension(2, 5), Dimension(6, 10)}); + auto B = make_shared(element::f32, PartialShape{Dimension(7, 8), Dimension(15, 20)}); - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{Dimension(2, 5), Dimension(15, 20)})); @@ -439,37 +444,37 @@ TEST(type_prop, matmul_batch_dynamic_bounds) { 5, // 18 4}; // 19 - auto A = make_shared(element::f32, A_shape); - auto B = make_shared(element::f32, B_shape); + auto A = make_shared(element::f32, A_shape); + auto B = make_shared(element::f32, B_shape); - auto matmul = make_shared(A, B); + auto matmul = make_shared(A, B); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); } TEST(type_prop, matmul_incompatible_matrix_dim_bounds) { - auto A = make_shared(element::f32, PartialShape{Dimension(2, 5), Dimension(3, 4)}); - auto B = make_shared(element::f32, PartialShape{Dimension(1, 2), Dimension(15, 20)}); + auto A = make_shared(element::f32, PartialShape{Dimension(2, 5), Dimension(3, 4)}); + auto B = make_shared(element::f32, PartialShape{Dimension(1, 2), Dimension(15, 20)}); auto expected_output_shape = PartialShape{Dimension(2, 5), Dimension(15, 20)}; // No error for backward compatibility - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); } TEST(type_prop, matmul_incompatible_batch_dim_bounds) { - auto A = make_shared(element::f32, PartialShape{Dimension(2, 5), 4, 3}); - auto B = make_shared(element::f32, PartialShape{Dimension(6, 10), 3, 2}); + auto A = make_shared(element::f32, PartialShape{Dimension(2, 5), 4, 3}); + auto B = make_shared(element::f32, PartialShape{Dimension(6, 10), 3, 2}); Dimension dynamic = Dimension::dynamic(); auto expected_output_shape = PartialShape{dynamic, 4, 2}; // No error for backward compatibility - auto matmul = make_shared(A, B, false, false); + auto matmul = make_shared(A, B, false, false); ASSERT_EQ(matmul->get_element_type(), element::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); @@ -485,9 +490,9 @@ TEST(type_prop, matmul_propagate_labels) { set_shape_labels(a_shape, a_labels); set_shape_labels(b_shape, b_labels); - const auto a = make_shared(element::f32, a_shape); - const auto b = make_shared(element::f32, b_shape); - const auto matmul = make_shared(a, b, false, false); + const auto a = make_shared(element::f32, a_shape); + const auto b = make_shared(element::f32, b_shape); + const auto matmul = make_shared(a, b, false, false); const auto& output_shape = matmul->get_output_partial_shape(0); const auto labels = get_shape_labels(output_shape); @@ -511,9 +516,9 @@ TEST(type_prop, matmul_propagate_labels_on_interval_dims) { set_shape_labels(a_shape, a_labels); set_shape_labels(b_shape, b_labels); - const auto a = make_shared(element::f32, a_shape); - const auto b = make_shared(element::f32, b_shape); - const auto matmul = make_shared(a, b, false, false); + const auto a = make_shared(element::f32, a_shape); + const auto b = make_shared(element::f32, b_shape); + const auto matmul = make_shared(a, b, false, false); const auto& output_shape = matmul->get_output_partial_shape(0); const auto labels = get_shape_labels(output_shape); @@ -535,19 +540,19 @@ TEST(type_prop, matmul_propagate_label_on_b_input_after_reshape) { const auto a_shape = PartialShape{Dimension::dynamic(), 5, 3}; const auto b_shape = PartialShape{3, marked_dim, 2}; - const auto b = make_shared(element::f32, b_shape); - const auto shape_of_b = std::make_shared(b); + const auto b = make_shared(element::f32, b_shape); + const auto shape_of_b = std::make_shared(b); const auto gather = std::make_shared( shape_of_b, - std::make_shared(element::i64, Shape{2}, std::vector{1, 0}), - std::make_shared(element::i64, Shape{}, 0)); - const auto concat = - std::make_shared(OutputVector{gather, std::make_shared(element::i64, Shape{1}, 8)}, - 0); + std::make_shared(element::i64, Shape{2}, std::vector{1, 0}), + std::make_shared(element::i64, Shape{}, 0)); + const auto concat = std::make_shared( + OutputVector{gather, std::make_shared(element::i64, Shape{1}, 8)}, + 0); const auto reshape_b = make_shared(b, concat, false); - const auto a = make_shared(element::f32, a_shape); - const auto matmul = make_shared(a, reshape_b, false, false); + const auto a = make_shared(element::f32, a_shape); + const auto matmul = make_shared(a, reshape_b, false, false); const auto& output_shape = matmul->get_output_partial_shape(0); const auto labels = get_shape_labels(output_shape); diff --git a/src/core/tests/type_prop/max_pool.cpp b/src/core/tests/type_prop/max_pool.cpp index ca097ef0cb653e..4e772323f1179d 100644 --- a/src/core/tests/type_prop/max_pool.cpp +++ b/src/core/tests/type_prop/max_pool.cpp @@ -2,13 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/max_pool.hpp" + #include "common_test_utils/type_prop.hpp" #include "dimension_util.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/parameter.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, max_pool_default_ctor) { @@ -19,7 +20,7 @@ TEST(type_prop, max_pool_default_ctor) { const Shape pads_end{2}; const Shape kernel_shape{2}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(); mp->set_argument(0, arg); @@ -47,7 +48,7 @@ TEST(type_prop, max_pool_valid_auto_padding) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::VALID; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, {9, 31}})); EXPECT_THAT(get_shape_labels(mp->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label)); @@ -64,7 +65,7 @@ TEST(type_prop, max_pool_1D_auto_padding) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32})); @@ -81,7 +82,7 @@ TEST(type_prop, max_pool_2D_auto_padding) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32, 32})); @@ -98,7 +99,7 @@ TEST(type_prop, max_pool_auto_padding_1D_nc_dims_dynamic_same_lower) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 32, 32})); @@ -115,7 +116,7 @@ TEST(type_prop, max_pool_auto_padding_2D_nc_dims_dynamic_same_lower) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), Dimension::dynamic(), 32, 32})); @@ -133,7 +134,7 @@ TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_upper) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_UPPER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), Dimension::dynamic(), 32, 32})); @@ -152,7 +153,7 @@ TEST(type_prop, max_pool_auto_padding_interval_dims_same_upper) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_UPPER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({{1, 2}, {2, 3}, -1, -1})); @@ -170,7 +171,7 @@ TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32, Dimension::dynamic()})); @@ -185,7 +186,7 @@ TEST(type_prop, max_pool_default_values) { const Shape pads_end{0, 0}; const Shape kernel_shape{2, 2}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape); EXPECT_EQ(mp->get_rounding_type(), op::RoundingType::FLOOR); @@ -200,7 +201,7 @@ TEST(type_prop, max_pool_v8_3D_no_dilations) { const Shape pads_end{0}; const Shape kernel_shape{3}; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto expected_output_shape = PartialShape({1, 7, 11}); @@ -216,7 +217,7 @@ TEST(type_prop, max_pool_v8_3D_with_dilations) { const Shape pads_end{0}; const Shape kernel_shape{3}; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto expected_output_shape = PartialShape({1, 7, 9}); @@ -232,7 +233,7 @@ TEST(type_prop, max_pool_v8_3D_with_dilations_and_padding) { const Shape pads_end{2}; const Shape kernel_shape{3}; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto expected_output_shape = PartialShape({1, 7, 12}); @@ -248,7 +249,7 @@ TEST(type_prop, max_pool_v8_4D_no_dilations) { const Shape pads_end{0, 0}; const Shape kernel_shape{2, 2}; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto expected_output_shape = PartialShape({1, 3, 12, 12}); @@ -264,7 +265,7 @@ TEST(type_prop, max_pool_v8_4D_with_dilations) { const Shape pads_end{0, 0}; const Shape kernel_shape{2, 2}; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto expected_output_shape = PartialShape({1, 3, 11, 10}); @@ -281,7 +282,7 @@ TEST(type_prop, max_pool_v8_4D_dynamic_dims_with_non_zero_low_range_floor_mode) const Shape kernel_shape{2, 2}; const auto rounding_mode = op::RoundingType::FLOOR; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape, rounding_mode); @@ -300,7 +301,7 @@ TEST(type_prop, max_pool_v8_4D_dynamic_dims_with_non_zero_low_range_ceil_mode) { const Shape kernel_shape{2, 2}; const auto rounding_mode = op::RoundingType::CEIL; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape, rounding_mode); @@ -319,7 +320,7 @@ TEST(type_prop, max_pool_v8_4D_interval_dims_with_dilations) { const Shape pads_end{0, 0}; const Shape kernel_shape{2, 2}; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto expected_output_shape = PartialShape({{2, 3}, {1, 3}, {1, 11}, {3, 10}}); @@ -338,7 +339,7 @@ TEST(type_prop, max_pool_v8_4D_with_dilations_and_auto_pad_same_upper) { const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_UPPER; - const auto arg = make_shared(element::f32, arg_shape); + const auto arg = make_shared(element::f32, arg_shape); const auto mp = make_shared(arg, strides, dilations, diff --git a/src/core/tests/type_prop/mish.cpp b/src/core/tests/type_prop/mish.cpp index e6c7f2c33fdccd..73fcc9e2db254d 100644 --- a/src/core/tests/type_prop/mish.cpp +++ b/src/core/tests/type_prop/mish.cpp @@ -2,33 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/mish.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, mish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); auto mish_func = make_shared(data); EXPECT_EQ(mish_func->get_element_type(), element::f32); EXPECT_EQ(mish_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, mish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto mish_func = make_shared(data); EXPECT_EQ(mish_func->get_element_type(), element::f32); ASSERT_TRUE(mish_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown - auto mish_partial = make_shared(make_shared(element::f32, PartialShape::dynamic())); + auto mish_partial = + make_shared(make_shared(element::f32, PartialShape::dynamic())); ASSERT_TRUE(mish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, mish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto mish_func = make_shared(data); EXPECT_EQ(mish_func->get_element_type(), element::f32); ASSERT_TRUE(mish_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); @@ -36,16 +37,16 @@ TEST(type_prop, mish_partial_static_rank) { } TEST(type_prop, mish_incompatible_dtype_i32) { - auto data = make_shared(element::i32, Shape{1, 3, 6}); - ASSERT_THROW(const auto unused = std::make_shared(data), ngraph::NodeValidationFailure); + auto data = make_shared(element::i32, Shape{1, 3, 6}); + ASSERT_THROW(const auto unused = std::make_shared(data), ov::NodeValidationFailure); } TEST(type_prop, mish_incompatible_dtype_u32) { - auto data = make_shared(element::u32, Shape{1, 3, 6}); - ASSERT_THROW(const auto unused = std::make_shared(data), ngraph::NodeValidationFailure); + auto data = make_shared(element::u32, Shape{1, 3, 6}); + ASSERT_THROW(const auto unused = std::make_shared(data), ov::NodeValidationFailure); } TEST(type_prop, mish_incompatible_dtype_boolean) { - auto data = make_shared(element::boolean, Shape{1, 3, 6}); - ASSERT_THROW(const auto unused = std::make_shared(data), ngraph::NodeValidationFailure); + auto data = make_shared(element::boolean, Shape{1, 3, 6}); + ASSERT_THROW(const auto unused = std::make_shared(data), ov::NodeValidationFailure); } diff --git a/src/core/tests/type_prop/multiclass_nms.cpp b/src/core/tests/type_prop/multiclass_nms.cpp index 443c20a35fc6d7..ab47c59ef2ba14 100644 --- a/src/core/tests/type_prop/multiclass_nms.cpp +++ b/src/core/tests/type_prop/multiclass_nms.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" using namespace std; using namespace ov; diff --git a/src/core/tests/type_prop/mvn.cpp b/src/core/tests/type_prop/mvn.cpp index c6b9d59fd253cb..b912fa5a329bc7 100644 --- a/src/core/tests/type_prop/mvn.cpp +++ b/src/core/tests/type_prop/mvn.cpp @@ -2,34 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/mvn.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; // ------------------------------ V0 ------------------------------ TEST(type_prop, mvn) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto mvn_func = make_shared(data); + auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto mvn_func = make_shared(data); EXPECT_EQ(mvn_func->get_element_type(), element::f32); EXPECT_EQ(mvn_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, mvn_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); - auto mvn_func = make_shared(data); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto mvn_func = make_shared(data); EXPECT_EQ(mvn_func->get_element_type(), element::f32); EXPECT_EQ(mvn_func->get_reduction_axes(), (AxisSet{1, 2})); ASSERT_TRUE(mvn_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); // across_channels = false - EXPECT_EQ(make_shared(data, false)->get_reduction_axes(), (AxisSet{2})); + EXPECT_EQ(make_shared(data, false)->get_reduction_axes(), (AxisSet{2})); // rank unknown - auto mvn_partial = make_shared(make_shared(element::f32, PartialShape::dynamic())); + auto mvn_partial = + make_shared(make_shared(element::f32, PartialShape::dynamic())); EXPECT_EQ(mvn_partial->get_reduction_axes(), AxisSet{}); ASSERT_TRUE(mvn_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } @@ -37,8 +38,8 @@ TEST(type_prop, mvn_partial) { // ------------------------------ V6 ------------------------------ TEST(type_prop, mvn_6) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto axes = make_shared(element::i64, Shape{3}); + auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto axes = make_shared(element::i64, Shape{3}); auto mvn_func = make_shared(data, axes, true, 1e-6f, op::MVNEpsMode::INSIDE_SQRT); EXPECT_EQ(mvn_func->get_element_type(), element::f32); @@ -46,17 +47,18 @@ TEST(type_prop, mvn_6) { } TEST(type_prop, mvn_6_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 5, 6}); - auto axes = make_shared(element::i64, Shape{3}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 5, 6}); + auto axes = make_shared(element::i64, Shape{3}); auto mvn_func = make_shared(data, axes, true, 1e-6f, op::MVNEpsMode::INSIDE_SQRT); EXPECT_EQ(mvn_func->get_element_type(), element::f32); ASSERT_TRUE(mvn_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 5, 6}))); // rank unknown - auto mvn_partial = make_shared(make_shared(element::f32, PartialShape::dynamic()), - axes, - true, - 1e-6f, - op::MVNEpsMode::INSIDE_SQRT); + auto mvn_partial = + make_shared(make_shared(element::f32, PartialShape::dynamic()), + axes, + true, + 1e-6f, + op::MVNEpsMode::INSIDE_SQRT); ASSERT_TRUE(mvn_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } diff --git a/src/core/tests/type_prop/negative.cpp b/src/core/tests/type_prop/negative.cpp index 9465b522c82104..ea761d1ad2b2f5 100644 --- a/src/core/tests/type_prop/negative.cpp +++ b/src/core/tests/type_prop/negative.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/negative.hpp" + #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_negative, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/non_max_suppression.cpp b/src/core/tests/type_prop/non_max_suppression.cpp index e43ed4e9b83f8c..140a03d2f0b7e1 100644 --- a/src/core/tests/type_prop/non_max_suppression.cpp +++ b/src/core/tests/type_prop/non_max_suppression.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/op/constant.hpp" using namespace std; diff --git a/src/core/tests/type_prop/non_zero.cpp b/src/core/tests/type_prop/non_zero.cpp index 6180c1d1d97194..c3966b785933ad 100644 --- a/src/core/tests/type_prop/non_zero.cpp +++ b/src/core/tests/type_prop/non_zero.cpp @@ -2,36 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/non_zero.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, non_zero) { - auto data = make_shared(element::f32, Shape{3, 3, 224, 224}); + auto data = make_shared(element::f32, Shape{3, 3, 224, 224}); auto non_zero = make_shared(data); EXPECT_EQ(non_zero->get_element_type(), element::i64); ASSERT_EQ(non_zero->get_output_partial_shape(0), (PartialShape{4, {0, 451584}})); } TEST(type_prop, non_zero_partial_input) { - auto data = make_shared(element::f32, PartialShape{{3, 4}, {5, 6}, {7, 8}}); + auto data = make_shared(element::f32, PartialShape{{3, 4}, {5, 6}, {7, 8}}); auto non_zero = make_shared(data); EXPECT_EQ(non_zero->get_element_type(), element::i64); ASSERT_EQ(non_zero->get_output_partial_shape(0), (PartialShape{3, {0, 192}})); } TEST(type_prop, non_zero_partial_with_negative) { - auto data = make_shared(element::f32, PartialShape{{3, 4}, {5, 6}, -1}); + auto data = make_shared(element::f32, PartialShape{{3, 4}, {5, 6}, -1}); auto non_zero = make_shared(data); EXPECT_EQ(non_zero->get_element_type(), element::i64); ASSERT_EQ(non_zero->get_output_partial_shape(0), (PartialShape{3, -1})); } TEST(type_prop, non_zero_dynamic) { - auto data = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::f32, PartialShape::dynamic()); auto non_zero = make_shared(data); EXPECT_EQ(non_zero->get_element_type(), element::i64); EXPECT_TRUE( @@ -39,7 +39,7 @@ TEST(type_prop, non_zero_dynamic) { } TEST(type_prop, non_zero_output_type) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); auto non_zero = make_shared(data, element::i32); ASSERT_EQ(non_zero->get_output_element_type(0), element::i32); @@ -47,7 +47,7 @@ TEST(type_prop, non_zero_output_type) { } TEST(type_prop, non_zero_string_output_type) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); auto non_zero = make_shared(data, "i32"); ASSERT_EQ(non_zero->get_output_element_type(0), element::i32); @@ -55,7 +55,7 @@ TEST(type_prop, non_zero_string_output_type) { } TEST(type_prop, non_zero_bool_input_type) { - auto data = make_shared(element::boolean, Shape{1, 2, 3, 4}); + auto data = make_shared(element::boolean, Shape{1, 2, 3, 4}); auto non_zero = make_shared(data, element::i32); ASSERT_EQ(non_zero->get_output_element_type(0), element::i32); @@ -64,9 +64,9 @@ TEST(type_prop, non_zero_bool_input_type) { TEST(type_prop, non_zero_fail_index_element_type) { // Deduce type - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); try { - auto non_zero = make_shared(data, element::i16); + auto non_zero = make_shared(data, element::i16); // Should have thrown, so fail if it didn't FAIL() << "Invalid output type not detected"; diff --git a/src/core/tests/type_prop/normalize_l2.cpp b/src/core/tests/type_prop/normalize_l2.cpp index 374253f25229ee..34cf9d85704b50 100644 --- a/src/core/tests/type_prop/normalize_l2.cpp +++ b/src/core/tests/type_prop/normalize_l2.cpp @@ -2,17 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/normalize_l2.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, normalize_l2) { PartialShape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i32, Shape{2}, vector{1, 2}); + auto data = make_shared(element::f32, data_shape); + const auto axes = make_shared(element::i32, Shape{2}, vector{1, 2}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; auto normalize = make_shared(data, axes, eps, eps_mode); @@ -23,8 +24,8 @@ TEST(type_prop, normalize_l2) { TEST(type_prop, normalize_l2_dynamic) { PartialShape data_shape{2, Dimension::dynamic(), 3, Dimension(4, 6)}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i32, Shape{2}, vector{1, 2}); + auto data = make_shared(element::f32, data_shape); + const auto axes = make_shared(element::i32, Shape{2}, vector{1, 2}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; auto normalize = make_shared(data, axes, eps, eps_mode); @@ -35,8 +36,8 @@ TEST(type_prop, normalize_l2_dynamic) { TEST(type_prop, normalize_l2_axes_input_not_constant) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - auto axes = make_shared(element::u64, Shape{1}); + auto data = make_shared(element::f32, data_shape); + auto axes = make_shared(element::u64, Shape{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; ASSERT_NO_THROW(auto op = make_shared(data, axes, eps, eps_mode)); @@ -44,8 +45,8 @@ TEST(type_prop, normalize_l2_axes_input_not_constant) { TEST(type_prop, normalize_l2_invalid_axes_rank) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{1, 2}, vector{1, 2}); + auto data = make_shared(element::f32, data_shape); + const auto axes = make_shared(element::i64, Shape{1, 2}, vector{1, 2}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -62,8 +63,8 @@ TEST(type_prop, normalize_l2_invalid_axes_rank) { TEST(type_prop, normalize_l2_axes_out_of_bounds) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{2}, vector{3, 4}); + auto data = make_shared(element::f32, data_shape); + const auto axes = make_shared(element::i64, Shape{2}, vector{3, 4}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -80,8 +81,8 @@ TEST(type_prop, normalize_l2_axes_out_of_bounds) { TEST(type_prop, normalize_l2_negative_axes) { PartialShape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i32, Shape{1}, vector{-1}); + auto data = make_shared(element::f32, data_shape); + const auto axes = make_shared(element::i32, Shape{1}, vector{-1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; auto normalize = make_shared(data, axes, eps, eps_mode); diff --git a/src/core/tests/type_prop/one_hot.cpp b/src/core/tests/type_prop/one_hot.cpp index e0e45c6cbfbba6..9b222843ac3eef 100644 --- a/src/core/tests/type_prop/one_hot.cpp +++ b/src/core/tests/type_prop/one_hot.cpp @@ -2,43 +2,46 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/one_hot.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, one_hot_v1_output_shape) { - auto indices = make_shared(element::i64, Shape{3}); - auto depth = op::Constant::create(element::i64, Shape{}, {2}); - auto on_value = op::Constant::create(element::u32, Shape{}, {5}); - auto off_value = op::Constant::create(element::u32, Shape{}, {10}); + auto indices = make_shared(element::i64, Shape{3}); + auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {2}); + auto on_value = ov::op::v0::Constant::create(element::u32, Shape{}, {5}); + auto off_value = ov::op::v0::Constant::create(element::u32, Shape{}, {10}); int64_t axis = -1; auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::u32); ASSERT_EQ(ont_hot->get_shape(), (Shape{3, 2})); - auto dyn_indices = make_shared(element::i64, PartialShape{{1, 3}}); + auto dyn_indices = make_shared(element::i64, PartialShape{{1, 3}}); auto dyn_ont_hot = make_shared(dyn_indices, depth, on_value, off_value, axis); ASSERT_EQ(dyn_ont_hot->get_output_element_type(0), element::u32); ASSERT_EQ(dyn_ont_hot->get_output_partial_shape(0), (PartialShape{{1, 3}, 2})); } TEST(type_prop, one_hot_v1_output_shape_2) { - auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); - auto depth = op::Constant::create(element::i64, Shape{}, {4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); + auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {4}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = 3; auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::f32); ASSERT_EQ(ont_hot->get_shape(), (Shape{1, 3, 2, 4, 3})); - auto dyn_indices = make_shared(element::i64, PartialShape{1, {3, 5}, 2, 3}); + auto dyn_indices = make_shared(element::i64, PartialShape{1, {3, 5}, 2, 3}); auto dyn_ont_hot = make_shared(dyn_indices, depth, on_value, off_value, axis); ASSERT_EQ(dyn_ont_hot->get_output_element_type(0), element::f32); ASSERT_EQ(dyn_ont_hot->get_output_partial_shape(0), (PartialShape{1, {3, 5}, 2, 4, 3})); @@ -48,10 +51,10 @@ TEST(type_prop, one_hot_v1_indices_labels) { auto ind_shape = PartialShape{-1, {3, 5}, 2, 3}; set_shape_labels(ind_shape, {10, 11, 12, 13}); - auto dyn_indices = make_shared(element::i64, ind_shape); - auto depth = op::Constant::create(element::i64, Shape{}, {4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto dyn_indices = make_shared(element::i64, ind_shape); + auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {4}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = 1; PartialShape expected_shape{-1, 4, {3, 5}, 2, 3}; @@ -69,16 +72,16 @@ TEST(type_prop, one_hot_v1_depth_shape_of_value) { auto ind_shape = PartialShape{-1, {3, 5}, 2, 3}; set_shape_labels(ind_shape, {10, 11, 12, 13}); - auto dyn_indices = make_shared(element::i64, ind_shape); + auto dyn_indices = make_shared(element::i64, ind_shape); PartialShape shape_for_depth = PartialShape{4}; - auto data = make_shared(element::i8, shape_for_depth); + auto data = make_shared(element::i8, shape_for_depth); auto depth_dim = make_shared(data); auto depth = make_shared(depth_dim); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = 1; PartialShape expected_shape{-1, 4, {3, 5}, 2, 3}; @@ -94,19 +97,19 @@ TEST(type_prop, one_hot_v1_depth_value_label) { auto ind_shape = PartialShape{-1, {3, 5}, 2, 3}; set_shape_labels(ind_shape, {10, 11, 12, 13}); - auto dyn_indices = make_shared(element::i64, ind_shape); + auto dyn_indices = make_shared(element::i64, ind_shape); auto labeled_dim = Dimension(4, 6); ov::label_t depth_label = 2345664; ov::DimensionTracker::set_label(labeled_dim, depth_label); PartialShape shape_for_depth = PartialShape{labeled_dim}; - auto data = make_shared(element::i8, shape_for_depth); + auto data = make_shared(element::i8, shape_for_depth); auto depth_dim = make_shared(data); auto depth = make_shared(depth_dim); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = 1; PartialShape expected_shape{-1, {4, 6}, {3, 5}, 2, 3}; @@ -124,10 +127,10 @@ TEST(type_prop, one_hot_v1_output_labels) { auto ind_shape = PartialShape{-1, {3, 5}, 2, 3}; set_shape_labels(ind_shape, {10, 11, 12, 13}); - auto dyn_indices = make_shared(element::i64, ind_shape); - auto depth = op::Constant::create(element::i64, Shape{}, {4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto dyn_indices = make_shared(element::i64, ind_shape); + auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {4}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = 1; PartialShape expected_shape{-1, 4, {3, 5}, 2, 3}; @@ -142,10 +145,10 @@ TEST(type_prop, one_hot_v1_output_labels) { } TEST(type_prop, one_hot_v1_default_constructor) { - auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); - auto depth = op::Constant::create(element::i64, Shape{}, {4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); + auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {4}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = 3; auto ont_hot = make_shared(); @@ -164,16 +167,16 @@ TEST(type_prop, one_hot_v1_default_constructor) { } TEST(type_prop, one_hot_v1_indices_elem_not_integral) { - auto indices = make_shared(element::f16, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::u32, Shape{}); - auto off_value = make_shared(element::u32, Shape{}); + auto indices = make_shared(element::f16, Shape{2, 2}); + auto depth = make_shared(element::i64, Shape{}); + auto on_value = make_shared(element::u32, Shape{}); + auto off_value = make_shared(element::u32, Shape{}); int64_t axis = -1; try { auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Incorrect indices element type not detected"; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Indices must be integral element type.")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -181,16 +184,16 @@ TEST(type_prop, one_hot_v1_indices_elem_not_integral) { } TEST(type_prop, one_hot_v1_depth_elem_not_integral) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::f16, Shape{}); - auto on_value = make_shared(element::u32, Shape{}); - auto off_value = make_shared(element::u32, Shape{}); + auto indices = make_shared(element::i64, Shape{2, 2}); + auto depth = make_shared(element::f16, Shape{}); + auto on_value = make_shared(element::u32, Shape{}); + auto off_value = make_shared(element::u32, Shape{}); int64_t axis = -1; try { auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Incorrect depth element type not detected"; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Depth must be integral element type.")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -198,10 +201,10 @@ TEST(type_prop, one_hot_v1_depth_elem_not_integral) { } TEST(type_prop, one_hot_v1_negative_depth) { - auto indices = make_shared(element::i32, Shape{2, 2}); - auto depth = op::Constant::create(element::i64, Shape{}, {-4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto indices = make_shared(element::i32, Shape{2, 2}); + auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {-4}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = -1; OV_EXPECT_THROW(auto ont_hot = make_shared(indices, depth, on_value, off_value, axis), @@ -210,16 +213,16 @@ TEST(type_prop, one_hot_v1_negative_depth) { } TEST(type_prop, one_hot_v1_on_off_values_not_compatible) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::f16, Shape{}); + auto indices = make_shared(element::i64, Shape{2, 2}); + auto depth = make_shared(element::i64, Shape{}); + auto on_value = make_shared(element::bf16, Shape{}); + auto off_value = make_shared(element::f16, Shape{}); int64_t axis = -1; try { auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Incompatible on/off element types not detected"; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("on_value element type must be compatible with off_value element type.")); } catch (...) { @@ -228,16 +231,16 @@ TEST(type_prop, one_hot_v1_on_off_values_not_compatible) { } TEST(type_prop, one_hot_v1_depth_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{1}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::bf16, Shape{}); + auto indices = make_shared(element::i64, Shape{2, 2}); + auto depth = make_shared(element::i64, Shape{1}); + auto on_value = make_shared(element::bf16, Shape{}); + auto off_value = make_shared(element::bf16, Shape{}); int64_t axis = -1; try { auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Not scalar depth input not detected."; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("depth input must be scalar.")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -245,16 +248,16 @@ TEST(type_prop, one_hot_v1_depth_not_scalar) { } TEST(type_prop, one_hot_v1_on_value_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{2}); - auto off_value = make_shared(element::bf16, Shape{}); + auto indices = make_shared(element::i64, Shape{2, 2}); + auto depth = make_shared(element::i64, Shape{}); + auto on_value = make_shared(element::bf16, Shape{2}); + auto off_value = make_shared(element::bf16, Shape{}); int64_t axis = -1; try { auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Not scalar on_value input not detected."; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("on_value input must be scalar.")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -262,16 +265,16 @@ TEST(type_prop, one_hot_v1_on_value_not_scalar) { } TEST(type_prop, one_hot_v1_off_value_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::bf16, Shape{3}); + auto indices = make_shared(element::i64, Shape{2, 2}); + auto depth = make_shared(element::i64, Shape{}); + auto on_value = make_shared(element::bf16, Shape{}); + auto off_value = make_shared(element::bf16, Shape{3}); int64_t axis = -1; try { auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Not scalar off_value input not detected."; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("off_value input must be scalar.")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -279,31 +282,31 @@ TEST(type_prop, one_hot_v1_off_value_not_scalar) { } TEST(type_prop, one_hot_v1_out_types_1) { - auto indices = make_shared(element::i32, Shape{3, 2}); - auto depth = op::Constant::create(element::i32, Shape{}, {2}); + auto indices = make_shared(element::i32, Shape{3, 2}); + auto depth = ov::op::v0::Constant::create(element::i32, Shape{}, {2}); int64_t axis = -1; - auto on_value = op::Constant::create(element::f32, Shape{}, {-3.3}); - auto off_value = op::Constant::create(element::f32, Shape{}, {-10.12}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {-3.3}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {-10.12}); auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::f32); } TEST(type_prop, one_hot_v1_out_types_2) { - auto indices = make_shared(element::i64, Shape{3, 2}); - auto depth = op::Constant::create(element::i32, Shape{}, {2}); + auto indices = make_shared(element::i64, Shape{3, 2}); + auto depth = ov::op::v0::Constant::create(element::i32, Shape{}, {2}); int64_t axis = -1; - auto on_value = op::Constant::create(element::i32, Shape{}, {-1}); - auto off_value = op::Constant::create(element::i32, Shape{}, {7}); + auto on_value = ov::op::v0::Constant::create(element::i32, Shape{}, {-1}); + auto off_value = ov::op::v0::Constant::create(element::i32, Shape{}, {7}); auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::i32); } TEST(type_prop, one_hot_v1_out_types_3) { - auto indices = make_shared(element::i32, Shape{3, 2}); - auto depth = op::Constant::create(element::i32, Shape{}, {2}); + auto indices = make_shared(element::i32, Shape{3, 2}); + auto depth = ov::op::v0::Constant::create(element::i32, Shape{}, {2}); int64_t axis = -1; - auto on_value = op::Constant::create(element::boolean, Shape{}, {true}); - auto off_value = op::Constant::create(element::boolean, Shape{}, {false}); + auto on_value = ov::op::v0::Constant::create(element::boolean, Shape{}, {true}); + auto off_value = ov::op::v0::Constant::create(element::boolean, Shape{}, {false}); auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::boolean); } diff --git a/src/core/tests/type_prop/pad.cpp b/src/core/tests/type_prop/pad.cpp index fe5abd7ed53935..5b6bd525bb28c8 100644 --- a/src/core/tests/type_prop/pad.cpp +++ b/src/core/tests/type_prop/pad.cpp @@ -2,15 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/pad.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; template @@ -20,9 +21,9 @@ TYPED_TEST_SUITE_P(PadTest); TYPED_TEST_P(PadTest, pad_default_ctor) { const auto arg_shape = PartialShape{{1, 2}, {4, 10}, {3, 8}, {1, 2}}; - const auto arg = make_shared(element::f32, arg_shape); - const auto pads_begin = make_shared(element::i64, Shape{4}, std::vector{0, 2, 1, 0}); - const auto pads_end = make_shared(element::i64, Shape{4}, std::vector{0, 1, 1, 0}); + const auto arg = make_shared(element::f32, arg_shape); + const auto pads_begin = make_shared(element::i64, Shape{4}, std::vector{0, 2, 1, 0}); + const auto pads_end = make_shared(element::i64, Shape{4}, std::vector{0, 1, 1, 0}); const auto pad = make_shared(); pad->set_arguments(OutputVector{arg, pads_begin, pads_end}); @@ -34,10 +35,10 @@ TYPED_TEST_P(PadTest, pad_default_ctor) { } TYPED_TEST_P(PadTest, pad_arg_pad_value_type_mismatch) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); - auto arg_pad_value = make_shared(element::f16, Shape{1}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i64, Shape{1}); + auto pads_end = make_shared(element::i64, Shape{1}); + auto arg_pad_value = make_shared(element::f16, Shape{1}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::CONSTANT); @@ -52,10 +53,10 @@ TYPED_TEST_P(PadTest, pad_arg_pad_value_type_mismatch) { } TYPED_TEST_P(PadTest, pad_arg_pad_value_shape_not_compatible) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); - auto arg_pad_value = make_shared(element::f32, Shape{1}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i64, Shape{1}); + auto pads_end = make_shared(element::i64, Shape{1}); + auto arg_pad_value = make_shared(element::f32, Shape{1}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::CONSTANT); @@ -70,9 +71,9 @@ TYPED_TEST_P(PadTest, pad_arg_pad_value_shape_not_compatible) { } TYPED_TEST_P(PadTest, pad_pads_begin_shape_not_1D) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1, 2}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i64, Shape{1, 2}); + auto pads_end = make_shared(element::i64, Shape{1}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, op::PadMode::SYMMETRIC); @@ -87,9 +88,9 @@ TYPED_TEST_P(PadTest, pad_pads_begin_shape_not_1D) { } TYPED_TEST_P(PadTest, pad_pads_end_shape_not_1D) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1, 2}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i64, Shape{1}); + auto pads_end = make_shared(element::i64, Shape{1, 2}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, op::PadMode::SYMMETRIC); @@ -104,9 +105,9 @@ TYPED_TEST_P(PadTest, pad_pads_end_shape_not_1D) { } TYPED_TEST_P(PadTest, pad_pads_begin_size_not_correct) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{4}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i64, Shape{4}); + auto pads_end = make_shared(element::i64, Shape{1}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, op::PadMode::SYMMETRIC); @@ -123,10 +124,10 @@ TYPED_TEST_P(PadTest, pad_pads_begin_size_not_correct) { } TYPED_TEST_P(PadTest, pad_pads_end_size_not_correct) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{4}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i64, Shape{1}); + auto pads_end = make_shared(element::i64, Shape{4}); + auto arg_pad_value = make_shared(element::f32, Shape{}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::CONSTANT); @@ -143,9 +144,9 @@ TYPED_TEST_P(PadTest, pad_pads_end_size_not_correct) { } TYPED_TEST_P(PadTest, pad_arg_pads_begin_incompatible_type) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::f32, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::f32, Shape{1}); + auto pads_end = make_shared(element::i64, Shape{1}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, op::PadMode::REFLECT); @@ -160,9 +161,9 @@ TYPED_TEST_P(PadTest, pad_arg_pads_begin_incompatible_type) { } TYPED_TEST_P(PadTest, pad_arg_pads_end_incompatible_type) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::f32, Shape{1}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i64, Shape{1}); + auto pads_end = make_shared(element::f32, Shape{1}); try { auto pad = make_shared(arg, pads_begin, pads_end, op::PadMode::REFLECT); @@ -177,10 +178,10 @@ TYPED_TEST_P(PadTest, pad_arg_pads_end_incompatible_type) { } TYPED_TEST_P(PadTest, pad_deduce_too_small_for_edge) { - auto arg = make_shared(element::f32, Shape{1, 5, 0, 2}); - auto pads_begin = make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); - auto pads_end = make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + auto arg = make_shared(element::f32, Shape{1, 5, 0, 2}); + auto pads_begin = make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); + auto pads_end = make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); + auto arg_pad_value = make_shared(element::f32, Shape{}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::EDGE); @@ -197,10 +198,10 @@ TYPED_TEST_P(PadTest, pad_deduce_too_small_for_edge) { } TYPED_TEST_P(PadTest, pad_deduce_too_small_for_reflect) { - auto arg = make_shared(element::f32, Shape{1, 5, 1, 2}); - auto pads_begin = make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); - auto pads_end = make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + auto arg = make_shared(element::f32, Shape{1, 5, 1, 2}); + auto pads_begin = make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); + auto pads_end = make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); + auto arg_pad_value = make_shared(element::f32, Shape{}); try { auto pad_v1 = make_shared(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::REFLECT); @@ -219,9 +220,9 @@ TYPED_TEST_P(PadTest, pad_deduce_too_small_for_reflect) { TYPED_TEST_P(PadTest, pad_pads_end_got_negative_value) { auto arg_shape = PartialShape{-1, {0, 10}, {2, -1}, {2, 8}, {3, 10}, 5}; set_shape_labels(arg_shape, 10); - const auto arg = std::make_shared(element::f32, arg_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{6}, {2, 0, 1, 3, 2, 1}); - const auto pads_end = op::Constant::create(element::i64, Shape{6}, {-3, -2, -2, -3, -1, -3}); + const auto arg = std::make_shared(element::f32, arg_shape); + const auto pads_begin = ov::op::v0::Constant::create(element::i64, Shape{6}, {2, 0, 1, 3, 2, 1}); + const auto pads_end = ov::op::v0::Constant::create(element::i64, Shape{6}, {-3, -2, -2, -3, -1, -3}); const auto pad = make_shared(arg, pads_begin, pads_end, op::PadMode::REFLECT); @@ -233,9 +234,9 @@ TYPED_TEST_P(PadTest, pad_pads_end_got_negative_value) { TYPED_TEST_P(PadTest, pad_pads_begin_got_negative_value) { auto arg_shape = PartialShape{-1, {0, 10}, {2, -1}, {2, 8}, {3, 10}, 5}; set_shape_labels(arg_shape, 10); - const auto arg = std::make_shared(element::f32, arg_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{6}, {-1, -1, -2, -3, -8, -4}); - const auto pads_end = op::Constant::create(element::i64, Shape{6}, {0, 2, 0, 3, 5, 4}); + const auto arg = std::make_shared(element::f32, arg_shape); + const auto pads_begin = ov::op::v0::Constant::create(element::i64, Shape{6}, {-1, -1, -2, -3, -8, -4}); + const auto pads_end = ov::op::v0::Constant::create(element::i64, Shape{6}, {0, 2, 0, 3, 5, 4}); const auto pad = make_shared(arg, pads_begin, pads_end, op::PadMode::REFLECT); EXPECT_EQ(pad->get_output_partial_shape(0), PartialShape({-1, {1, 11}, {0, -1}, {2, 8}, {0, 7}, 5})); @@ -244,20 +245,20 @@ TYPED_TEST_P(PadTest, pad_pads_begin_got_negative_value) { } TYPED_TEST_P(PadTest, pad_dynamic_output_with_dynamic_rank) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto pads_begin = make_shared(element::i32, Shape{1}); - auto pads_end = make_shared(element::i32, Shape{1}); - auto arg_pad_value = op::Constant::create(element::f32, Shape{}, {0}); + auto arg = make_shared(element::f32, PartialShape::dynamic()); + auto pads_begin = make_shared(element::i32, Shape{1}); + auto pads_end = make_shared(element::i32, Shape{1}); + auto arg_pad_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0}); auto pad = make_shared(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::CONSTANT); ASSERT_EQ(pad->get_output_partial_shape(0), PartialShape::dynamic()); } TYPED_TEST_P(PadTest, pad_dynamic_output_with_static_rank) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i32, Shape{1}); - auto pads_end = make_shared(element::i32, Shape{1}); - auto arg_pad_value = op::Constant::create(element::f32, Shape{}, {0}); + auto arg = make_shared(element::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i32, Shape{1}); + auto pads_end = make_shared(element::i32, Shape{1}); + auto arg_pad_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0}); auto pad = make_shared(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::CONSTANT); ASSERT_EQ(pad->get_output_partial_shape(0), PartialShape::dynamic(3)); @@ -266,9 +267,9 @@ TYPED_TEST_P(PadTest, pad_dynamic_output_with_static_rank) { TYPED_TEST_P(PadTest, pad_any_dim_for_padding_reflect) { auto arg_shape = PartialShape{1, {23, 48}, {23, 48}, 1}; set_shape_labels(arg_shape, 10); - auto arg = make_shared(element::f32, arg_shape); - auto pads_begin = make_shared(element::i64, Shape{4}, std::vector{0, 1, 1, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, std::vector{0, 1, 1, 0}); + auto arg = make_shared(element::f32, arg_shape); + auto pads_begin = make_shared(element::i64, Shape{4}, std::vector{0, 1, 1, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, std::vector{0, 1, 1, 0}); auto pad = make_shared(arg, pads_begin, pads_end, op::PadMode::REFLECT); EXPECT_EQ(pad->get_output_partial_shape(0), PartialShape({1, {25, 50}, {25, 50}, 1})); @@ -278,9 +279,11 @@ TYPED_TEST_P(PadTest, pad_any_dim_for_padding_reflect) { TYPED_TEST_P(PadTest, pad_any_dim_for_padding_edge) { auto arg_shape = PartialShape{1, {0, 48}, -1, {20, -1}, {5, -1}, 10, 12}; set_shape_labels(arg_shape, 10); - auto arg = make_shared(element::f32, arg_shape); - auto pads_begin = make_shared(element::i64, Shape{7}, std::vector{1, 2, 1, 2, 0, 0, 0}); - auto pads_end = make_shared(element::i64, Shape{7}, std::vector{0, 3, 0, 1, 0, 5, 0}); + auto arg = make_shared(element::f32, arg_shape); + auto pads_begin = + make_shared(element::i64, Shape{7}, std::vector{1, 2, 1, 2, 0, 0, 0}); + auto pads_end = + make_shared(element::i64, Shape{7}, std::vector{0, 3, 0, 1, 0, 5, 0}); auto pad = make_shared(arg, pads_begin, pads_end, op::PadMode::EDGE); EXPECT_EQ(pad->get_output_partial_shape(0), PartialShape({2, {5, 53}, {1, -1}, {23, -1}, {5, -1}, 15, 12})); @@ -289,10 +292,10 @@ TYPED_TEST_P(PadTest, pad_any_dim_for_padding_edge) { } TYPED_TEST_P(PadTest, pad_dynamic_input_type_with_static_value) { - auto arg = make_shared(element::dynamic, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i32, Shape{1}); - auto pads_end = make_shared(element::i32, Shape{1}); - auto arg_pad_value = op::Constant::create(element::f32, Shape{}, {0}); + auto arg = make_shared(element::dynamic, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::i32, Shape{1}); + auto pads_end = make_shared(element::i32, Shape{1}); + auto arg_pad_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0}); auto pad = make_shared(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::CONSTANT); EXPECT_EQ(pad->get_output_element_type(0), element::f32); @@ -307,9 +310,9 @@ TYPED_TEST_P(PadTest, pad_preserve_partial_values_and_labels_via_evaluates_bound set_shape_labels(begin_shape, 20); set_shape_labels(end_shape, 30); - auto arg = make_shared(element::f32, arg_shape); - auto s_begin = make_shared(make_shared(element::i64, begin_shape)); - auto s_end = make_shared(make_shared(element::i64, end_shape)); + auto arg = make_shared(element::f32, arg_shape); + auto s_begin = make_shared(make_shared(element::i64, begin_shape)); + auto s_end = make_shared(make_shared(element::i64, end_shape)); auto pad = make_shared(arg, s_begin, s_end, op::PadMode::EDGE); @@ -320,14 +323,14 @@ TYPED_TEST_P(PadTest, pad_preserve_partial_values_and_labels_via_evaluates_bound TYPED_TEST_P(PadTest, pad_preserve_partial_values_and_labels_on_inputs) { auto arg_shape = PartialShape{1, {2, 5}, {1, 3}}; set_shape_labels(arg_shape, 10); - auto arg = make_shared(element::i32, arg_shape); - auto s = make_shared(arg); + auto arg = make_shared(element::i32, arg_shape); + auto s = make_shared(arg); - auto pads_begin = make_shared(element::i64, Shape{1}, std::vector{1}); - auto pads_end = make_shared(element::i64, Shape{1}, std::vector{2}); + auto pads_begin = make_shared(element::i64, Shape{1}, std::vector{1}); + auto pads_end = make_shared(element::i64, Shape{1}, std::vector{2}); auto pad = make_shared(s, pads_begin, pads_end, op::PadMode::EDGE); - auto param = make_shared(element::f32, PartialShape{1}); + auto param = make_shared(element::f32, PartialShape{1}); auto bc = std::make_shared(param, pad, op::BroadcastType::BIDIRECTIONAL); EXPECT_EQ(bc->get_output_partial_shape(0), PartialShape({1, 1, {2, 5}, {1, 3}, {1, 3}, {1, 3}})); diff --git a/src/core/tests/type_prop/parameter.cpp b/src/core/tests/type_prop/parameter.cpp index fac50bd27d105b..ff80e20450d8f4 100644 --- a/src/core/tests/type_prop/parameter.cpp +++ b/src/core/tests/type_prop/parameter.cpp @@ -3,14 +3,12 @@ // #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, param_partial_rank_dynamic) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::f32, PartialShape::dynamic()); auto& pshape = a->get_output_partial_shape(0); @@ -19,7 +17,7 @@ TEST(type_prop, param_partial_rank_dynamic) { } TEST(type_prop, param_partial_rank_static) { - auto a = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3, 4}); + auto a = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3, 4}); auto& pshape = a->get_output_partial_shape(0); @@ -32,7 +30,7 @@ TEST(type_prop, param_partial_rank_static) { } TEST(type_prop, param_layout) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::f32, PartialShape::dynamic()); a->set_layout("NHWC"); ASSERT_EQ(a->get_layout(), "NHWC"); a->set_layout(ov::Layout()); @@ -41,12 +39,12 @@ TEST(type_prop, param_layout) { } TEST(type_prop, param_layout_empty) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::f32, PartialShape::dynamic()); ASSERT_TRUE(a->get_layout().empty()); } TEST(type_prop, param_layout_invalid) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::f32, PartialShape::dynamic()); a->get_output_tensor(0).get_rt_info()[ov::LayoutAttribute::get_type_info_static()] = "NCHW"; // incorrect way ASSERT_THROW(a->get_layout(), ov::Exception); } diff --git a/src/core/tests/type_prop/prelu.cpp b/src/core/tests/type_prop/prelu.cpp index 46a865f69e887e..febe3f93997fb1 100644 --- a/src/core/tests/type_prop/prelu.cpp +++ b/src/core/tests/type_prop/prelu.cpp @@ -2,18 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/prelu.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, prelu) { - auto param = make_shared(element::f32, Shape{2, 4}); - auto slope = make_shared(element::f32, Shape{2}); + auto param = make_shared(element::f32, Shape{2, 4}); + auto slope = make_shared(element::f32, Shape{2}); Shape prelu_shape{2, 4}; - auto prelu = make_shared(param, slope); + auto prelu = make_shared(param, slope); ASSERT_EQ(prelu->get_element_type(), element::f32); ASSERT_EQ(prelu->get_shape(), prelu_shape); } diff --git a/src/core/tests/type_prop/prior_box.cpp b/src/core/tests/type_prop/prior_box.cpp index e971436bf7fdbd..2a88b443cb797b 100644 --- a/src/core/tests/type_prop/prior_box.cpp +++ b/src/core/tests/type_prop/prior_box.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" #include "openvino/opsets/opset11.hpp" using namespace ov; diff --git a/src/core/tests/type_prop/prior_box_clustered.cpp b/src/core/tests/type_prop/prior_box_clustered.cpp index 8efe142ae8b0a5..69531cebc01f5f 100644 --- a/src/core/tests/type_prop/prior_box_clustered.cpp +++ b/src/core/tests/type_prop/prior_box_clustered.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" #include "openvino/opsets/opset11.hpp" using namespace ov; diff --git a/src/core/tests/type_prop/proposal.cpp b/src/core/tests/type_prop/proposal.cpp index 84224a1b6900d5..54bceb5bf76a15 100644 --- a/src/core/tests/type_prop/proposal.cpp +++ b/src/core/tests/type_prop/proposal.cpp @@ -2,24 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/proposal.hpp" +#include "openvino/op/proposal.hpp" #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; // ------------------------------ V0 ------------------------------ TEST(type_prop, proposal_v0_invalid_class_probs_rank) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -27,10 +25,10 @@ TEST(type_prop, proposal_v0_invalid_class_probs_rank) { } TEST(type_prop, proposal_v0_invalid_anchor_count) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -38,10 +36,10 @@ TEST(type_prop, proposal_v0_invalid_anchor_count) { } TEST(type_prop, proposal_v0_invalid_class_bbox_deltas_rank) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -49,10 +47,10 @@ TEST(type_prop, proposal_v0_invalid_class_bbox_deltas_rank) { } TEST(type_prop, proposal_v0_invalid_image_shape_rank) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{2, 1}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{2, 1}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -60,10 +58,10 @@ TEST(type_prop, proposal_v0_invalid_image_shape_rank) { } TEST(type_prop, proposal_v0_invalid_image_shape_size) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{5}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{5}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -71,15 +69,15 @@ TEST(type_prop, proposal_v0_invalid_image_shape_size) { } TEST(type_prop, proposal_v0_default_ctor) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.base_size = 1; attrs.pre_nms_topn = 20; attrs.post_nms_topn = 200; const size_t batch_size = 7; - auto class_probs = make_shared(element::f16, Shape{batch_size, 12, 34, 62}); - auto class_bbox_deltas = make_shared(element::f16, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::f16, Shape{3}); + auto class_probs = make_shared(element::f16, Shape{batch_size, 12, 34, 62}); + auto class_bbox_deltas = make_shared(element::f16, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::f16, Shape{3}); auto op = make_shared(); op->set_arguments(OutputVector{class_probs, class_bbox_deltas, image_shape}); @@ -93,15 +91,15 @@ TEST(type_prop, proposal_v0_default_ctor) { } TEST(type_prop, proposal_v0_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.base_size = 1; attrs.pre_nms_topn = 20; attrs.post_nms_topn = 200; const size_t batch_size = 7; - auto class_probs = make_shared(element::bf16, Shape{batch_size, 12, 34, 62}); - auto class_bbox_deltas = make_shared(element::bf16, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::bf16, Shape{3}); + auto class_probs = make_shared(element::bf16, Shape{batch_size, 12, 34, 62}); + auto class_bbox_deltas = make_shared(element::bf16, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::bf16, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_element_type(0), element::bf16); @@ -109,7 +107,7 @@ TEST(type_prop, proposal_v0_shape_infer) { } TEST(type_prop, proposal_v0_dynamic_class_probs_dim1_batch_size_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; const auto batch_size = Dimension(2); @@ -118,9 +116,9 @@ TEST(type_prop, proposal_v0_dynamic_class_probs_dim1_batch_size_infer) { set_shape_labels(class_props_shape, 10); set_shape_labels(class_bbox_shape, 20); - auto class_probs = make_shared(element::f32, class_props_shape); - auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::f32, class_props_shape); + auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); + auto image_shape = make_shared(element::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -130,7 +128,7 @@ TEST(type_prop, proposal_v0_dynamic_class_probs_dim1_batch_size_infer) { } TEST(type_prop, proposal_v0_dynamic_bbox_deltas_dim1_batch_size_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; const auto batch_size = Dimension(2); @@ -138,9 +136,9 @@ TEST(type_prop, proposal_v0_dynamic_bbox_deltas_dim1_batch_size_infer) { auto class_bbox_shape = PartialShape{-1, 4, 3, 4}; set_shape_labels(class_props_shape, 10); - auto class_probs = make_shared(element::f64, class_props_shape); - auto class_bbox_deltas = make_shared(element::f64, class_bbox_shape); - auto image_shape = make_shared(element::f64, Shape{3}); + auto class_probs = make_shared(element::f64, class_props_shape); + auto class_bbox_deltas = make_shared(element::f64, class_bbox_shape); + auto image_shape = make_shared(element::f64, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -150,11 +148,11 @@ TEST(type_prop, proposal_v0_dynamic_bbox_deltas_dim1_batch_size_infer) { } TEST(type_prop, proposal_v0_dynamic_class_probs_bbox_deltas_dim1_batch_size_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; - auto class_probs = make_shared(element::f32, PartialShape{-1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, PartialShape{-1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::f32, PartialShape{-1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, PartialShape{-1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -163,7 +161,7 @@ TEST(type_prop, proposal_v0_dynamic_class_probs_bbox_deltas_dim1_batch_size_infe } TEST(type_prop, proposal_v0_dynamic_range_class_probs_bbox_deltas_dim1_batch_size_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 2; auto class_props_shape = PartialShape{{8, 14}, 2, 3, 4}; @@ -171,9 +169,9 @@ TEST(type_prop, proposal_v0_dynamic_range_class_probs_bbox_deltas_dim1_batch_siz set_shape_labels(class_props_shape, 10); set_shape_labels(class_bbox_shape, 20); - auto class_probs = make_shared(element::f32, class_props_shape); - auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::f32, class_props_shape); + auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); + auto image_shape = make_shared(element::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_partial_shape(0), @@ -182,7 +180,7 @@ TEST(type_prop, proposal_v0_dynamic_range_class_probs_bbox_deltas_dim1_batch_siz } TEST(type_prop, proposal_v0_dynamic_image_shape_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.base_size = 2; attrs.pre_nms_topn = 20; attrs.post_nms_topn = 200; @@ -193,9 +191,9 @@ TEST(type_prop, proposal_v0_dynamic_image_shape_shape_infer) { set_shape_labels(class_props_shape, 10); set_shape_labels(class_bbox_shape, 20); - auto class_probs = make_shared(element::f32, class_props_shape); - auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); - auto image_shape = make_shared(element::f32, PartialShape::dynamic()); + auto class_probs = make_shared(element::f32, class_props_shape); + auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); + auto image_shape = make_shared(element::f32, PartialShape::dynamic()); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -204,69 +202,69 @@ TEST(type_prop, proposal_v0_dynamic_image_shape_shape_infer) { } TEST(type_prop, proposal_v0_class_probs_dynamic_rank_but_batch_shape_defined_in_bbox) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 2; const auto batch_size = Dimension(7); - auto class_probs = make_shared(element::f32, PartialShape::dynamic()); - auto class_bbox_deltas = make_shared(element::f32, PartialShape{batch_size, 24, 32, 32}); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape::dynamic()); + auto class_bbox_deltas = make_shared(element::f32, PartialShape{batch_size, 24, 32, 32}); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{batch_size * attrs.post_nms_topn, 5})); } TEST(type_prop, proposal_v0_bbox_dynamic_rank_but_batch_defined_in_class_probs) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 2; const auto batch_size = Dimension(7); - auto class_probs = make_shared(element::f32, PartialShape{batch_size, 24, 32, 32}); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic()); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape{batch_size, 24, 32, 32}); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic()); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{batch_size * attrs.post_nms_topn, 5})); } TEST(type_prop, proposal_v0_everything_dynamic_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; - auto class_probs = make_shared(element::f32, PartialShape::dynamic(4)); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(4)); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape::dynamic(4)); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(4)); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, 5})); } TEST(type_prop, proposal_v0_everything_dynamic_class_probs_dynamic_rank_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; - auto class_probs = make_shared(element::f32, PartialShape::dynamic()); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(4)); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape::dynamic()); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(4)); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, 5})); } TEST(type_prop, proposal_v0_everything_dynamic_class_probs_bbox_deltas_dynamic_rank_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; - auto class_probs = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, 5})); } TEST(type_prop, proposal_v0_invalid_class_probs_dynamic) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, PartialShape::dynamic(3)); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{5}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, PartialShape::dynamic(3)); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{5}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -274,10 +272,10 @@ TEST(type_prop, proposal_v0_invalid_class_probs_dynamic) { } TEST(type_prop, proposal_v0_invalid_bbox_deltas_dynamic) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(3)); - auto image_shape = make_shared(element::f32, Shape{5}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(3)); + auto image_shape = make_shared(element::f32, Shape{5}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -285,10 +283,10 @@ TEST(type_prop, proposal_v0_invalid_bbox_deltas_dynamic) { } TEST(type_prop, proposal_v0_invalid_image_shape_dynamic) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(0)); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(0)); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -296,10 +294,10 @@ TEST(type_prop, proposal_v0_invalid_image_shape_dynamic) { } TEST(type_prop, proposal_v0_invalid_class_probs_type) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::i32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::i32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -307,10 +305,10 @@ TEST(type_prop, proposal_v0_invalid_class_probs_type) { } TEST(type_prop, proposal_v0_invalid_bbox_deltas_type) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::i32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::i32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -318,10 +316,10 @@ TEST(type_prop, proposal_v0_invalid_bbox_deltas_type) { } TEST(type_prop, proposal_v0_invalid_image_shape_type) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::i32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::i32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -331,10 +329,10 @@ TEST(type_prop, proposal_v0_invalid_image_shape_type) { // ------------------------------ V4 ------------------------------ TEST(type_prop, proposal_v4_invalid_class_probs_rank) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -342,10 +340,10 @@ TEST(type_prop, proposal_v4_invalid_class_probs_rank) { } TEST(type_prop, proposal_v4_invalid_class_bbox_deltas_rank) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -353,10 +351,10 @@ TEST(type_prop, proposal_v4_invalid_class_bbox_deltas_rank) { } TEST(type_prop, proposal_v4_invalid_image_shape_rank) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{2, 1}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{2, 1}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -364,10 +362,10 @@ TEST(type_prop, proposal_v4_invalid_image_shape_rank) { } TEST(type_prop, proposal_v4_invalid_image_shape_size) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{5}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{5}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -375,15 +373,15 @@ TEST(type_prop, proposal_v4_invalid_image_shape_size) { } TEST(type_prop, proposal_v4_default_ctor) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.base_size = 1; attrs.pre_nms_topn = 20; attrs.post_nms_topn = 200; const size_t batch_size = 7; - auto class_probs = make_shared(element::f16, Shape{batch_size, 12, 34, 62}); - auto class_bbox_deltas = make_shared(element::f16, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::f16, Shape{3}); + auto class_probs = make_shared(element::f16, Shape{batch_size, 12, 34, 62}); + auto class_bbox_deltas = make_shared(element::f16, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::f16, Shape{3}); auto op = make_shared(); op->set_arguments(OutputVector{class_probs, class_bbox_deltas, image_shape}); @@ -399,15 +397,15 @@ TEST(type_prop, proposal_v4_default_ctor) { } TEST(type_prop, proposal_v4_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.base_size = 1; attrs.pre_nms_topn = 20; attrs.post_nms_topn = 200; const size_t batch_size = 7; - auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); - auto class_bbox_deltas = make_shared(element::f32, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); + auto class_bbox_deltas = make_shared(element::f32, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_THAT(op->outputs(), Each(Property("Element type", &Output::get_element_type, element::f32))); @@ -416,7 +414,7 @@ TEST(type_prop, proposal_v4_shape_infer) { } TEST(type_prop, proposal_v4_dynamic_class_probs_dim1_batch_size_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; const auto batch_size = Dimension(2); @@ -425,9 +423,9 @@ TEST(type_prop, proposal_v4_dynamic_class_probs_dim1_batch_size_infer) { set_shape_labels(class_props_shape, 10); set_shape_labels(class_bbox_shape, 20); - auto class_probs = make_shared(element::f64, class_props_shape); - auto class_bbox_deltas = make_shared(element::f64, class_bbox_shape); - auto image_shape = make_shared(element::f64, Shape{3}); + auto class_probs = make_shared(element::f64, class_props_shape); + auto class_bbox_deltas = make_shared(element::f64, class_bbox_shape); + auto image_shape = make_shared(element::f64, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -440,7 +438,7 @@ TEST(type_prop, proposal_v4_dynamic_class_probs_dim1_batch_size_infer) { } TEST(type_prop, proposal_v4_dynamic_bbox_deltas_dim1_batch_size_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; const auto batch_size = Dimension(2); @@ -448,9 +446,9 @@ TEST(type_prop, proposal_v4_dynamic_bbox_deltas_dim1_batch_size_infer) { auto class_bbox_shape = PartialShape{-1, 4, {0, 3}, {1, 4}}; set_shape_labels(class_props_shape, 10); - auto class_probs = make_shared(element::bf16, class_props_shape); - auto class_bbox_deltas = make_shared(element::bf16, class_bbox_shape); - auto image_shape = make_shared(element::bf16, Shape{3}); + auto class_probs = make_shared(element::bf16, class_props_shape); + auto class_bbox_deltas = make_shared(element::bf16, class_bbox_shape); + auto image_shape = make_shared(element::bf16, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -463,16 +461,16 @@ TEST(type_prop, proposal_v4_dynamic_bbox_deltas_dim1_batch_size_infer) { } TEST(type_prop, proposal_v4_dynamic_class_probs_bbox_deltas_dim1_batch_size_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; auto class_props_shape = PartialShape{-1, 2, 3, 4}; auto class_bbox_shape = PartialShape{-1, 4, 3, 4}; set_shape_labels(class_bbox_shape, 20); - auto class_probs = make_shared(element::f32, class_props_shape); - auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::f32, class_props_shape); + auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); + auto image_shape = make_shared(element::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -484,7 +482,7 @@ TEST(type_prop, proposal_v4_dynamic_class_probs_bbox_deltas_dim1_batch_size_infe } TEST(type_prop, proposal_v4_dynamic_image_shape_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.base_size = 1; attrs.pre_nms_topn = 20; attrs.post_nms_topn = 200; @@ -495,9 +493,9 @@ TEST(type_prop, proposal_v4_dynamic_image_shape_shape_infer) { set_shape_labels(class_props_shape, 10); set_shape_labels(class_bbox_shape, 20); - auto class_probs = make_shared(element::f32, class_props_shape); - auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); - auto image_shape = make_shared(element::f32, PartialShape::dynamic()); + auto class_probs = make_shared(element::f32, class_props_shape); + auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); + auto image_shape = make_shared(element::f32, PartialShape::dynamic()); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -509,11 +507,11 @@ TEST(type_prop, proposal_v4_dynamic_image_shape_shape_infer) { } TEST(type_prop, proposal_v4_everything_dynamic_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; - auto class_probs = make_shared(element::f32, PartialShape::dynamic(4)); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(4)); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape::dynamic(4)); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(4)); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -522,11 +520,11 @@ TEST(type_prop, proposal_v4_everything_dynamic_shape_infer) { } TEST(type_prop, proposal_v4_everything_dynamic_class_probs_dynamic_rank_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; - auto class_probs = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(4)); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(4)); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -535,11 +533,11 @@ TEST(type_prop, proposal_v4_everything_dynamic_class_probs_dynamic_rank_shape_in } TEST(type_prop, proposal_v4_everything_dynamic_class_probs_bbox_deltas_dynamic_rank_shape_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; - auto class_probs = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -548,7 +546,7 @@ TEST(type_prop, proposal_v4_everything_dynamic_class_probs_bbox_deltas_dynamic_r } TEST(type_prop, proposal_v4_dynamic_range_class_probs_bbox_deltas_dim1_batch_size_infer) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 2; auto class_props_shape = PartialShape{{8, 14}, 2, 3, 4}; @@ -556,9 +554,9 @@ TEST(type_prop, proposal_v4_dynamic_range_class_probs_bbox_deltas_dim1_batch_siz set_shape_labels(class_props_shape, 10); set_shape_labels(class_bbox_shape, 20); - auto class_probs = make_shared(element::f32, class_props_shape); - auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::f32, class_props_shape); + auto class_bbox_deltas = make_shared(element::f32, class_bbox_shape); + auto image_shape = make_shared(element::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); @@ -572,13 +570,13 @@ TEST(type_prop, proposal_v4_dynamic_range_class_probs_bbox_deltas_dim1_batch_siz } TEST(type_prop, proposal_v4_class_dynamic_rank_but_batch_shape_defined_in_bbox) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; const auto batch_size = Dimension(7); - auto class_probs = make_shared(element::f32, PartialShape::dynamic()); - auto class_bbox_deltas = make_shared(element::f32, PartialShape{batch_size, 24, 32, 32}); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape::dynamic()); + auto class_bbox_deltas = make_shared(element::f32, PartialShape{batch_size, 24, 32, 32}); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{batch_size * attrs.post_nms_topn, 5})); @@ -586,13 +584,13 @@ TEST(type_prop, proposal_v4_class_dynamic_rank_but_batch_shape_defined_in_bbox) } TEST(type_prop, proposal_v4_bbox_dynamic_rank_but_batch_defined_in_class_probs) { - op::ProposalAttrs attrs; + op::v0::Proposal::Attributes attrs; attrs.post_nms_topn = 1; const auto batch_size = Dimension(10); - auto class_probs = make_shared(element::f32, PartialShape{batch_size, 24, 32, 32}); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic()); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); + auto class_probs = make_shared(element::f32, PartialShape{batch_size, 24, 32, 32}); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic()); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(1)); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{batch_size * attrs.post_nms_topn, 5})); @@ -600,10 +598,10 @@ TEST(type_prop, proposal_v4_bbox_dynamic_rank_but_batch_defined_in_class_probs) } TEST(type_prop, proposal_v4_invalid_class_probs_dynamic) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, PartialShape::dynamic(3)); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{5}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, PartialShape::dynamic(3)); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{5}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -611,10 +609,10 @@ TEST(type_prop, proposal_v4_invalid_class_probs_dynamic) { } TEST(type_prop, proposal_v4_invalid_bbox_deltas_dynamic) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(3)); - auto image_shape = make_shared(element::f32, Shape{5}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, PartialShape::dynamic(3)); + auto image_shape = make_shared(element::f32, Shape{5}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -622,10 +620,10 @@ TEST(type_prop, proposal_v4_invalid_bbox_deltas_dynamic) { } TEST(type_prop, proposal_v4_invalid_image_shape_dynamic) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, PartialShape::dynamic(0)); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, PartialShape::dynamic(0)); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -633,10 +631,10 @@ TEST(type_prop, proposal_v4_invalid_image_shape_dynamic) { } TEST(type_prop, proposal_v4_invalid_class_probs_type) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::i32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::i32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -644,10 +642,10 @@ TEST(type_prop, proposal_v4_invalid_class_probs_type) { } TEST(type_prop, proposal_v4_invalid_bbox_deltas_type) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::i32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::i32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::f32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, @@ -655,10 +653,10 @@ TEST(type_prop, proposal_v4_invalid_bbox_deltas_type) { } TEST(type_prop, proposal_v4_invalid_image_shape_type) { - op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); - auto image_shape = make_shared(element::i32, Shape{3}); + op::v0::Proposal::Attributes attrs; + auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::f32, Shape{1, 4, 3, 4}); + auto image_shape = make_shared(element::i32, Shape{3}); OV_EXPECT_THROW(std::ignore = make_shared(class_probs, class_bbox_deltas, image_shape, attrs), NodeValidationFailure, diff --git a/src/core/tests/type_prop/psroi_pooling.cpp b/src/core/tests/type_prop/psroi_pooling.cpp index 483516f924edf7..0bb62e1739db5e 100644 --- a/src/core/tests/type_prop/psroi_pooling.cpp +++ b/src/core/tests/type_prop/psroi_pooling.cpp @@ -4,7 +4,6 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/opsets/opset11.hpp" using namespace ov; diff --git a/src/core/tests/type_prop/random_uniform.cpp b/src/core/tests/type_prop/random_uniform.cpp index 51e37b58f19136..769172b0a58702 100644 --- a/src/core/tests/type_prop/random_uniform.cpp +++ b/src/core/tests/type_prop/random_uniform.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" #include "openvino/opsets/opset8.hpp" diff --git a/src/core/tests/type_prop/range.cpp b/src/core/tests/type_prop/range.cpp index 98b43c29649305..30dc57bac7bb1b 100644 --- a/src/core/tests/type_prop/range.cpp +++ b/src/core/tests/type_prop/range.cpp @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/range.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" using namespace std; -using namespace ngraph; +using namespace ov; struct RangeParams { double start; @@ -19,56 +20,56 @@ struct RangeParams { // ------------------------------ V0 ------------------------------ TEST(type_prop, range_nonconst_ok) { - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::i32, Shape{}); + auto stop = make_shared(element::i32, Shape{}); + auto step = make_shared(element::i32, Shape{}); - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); EXPECT_EQ(range->get_element_type(), element::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_some_dyn_et_ok) { - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::dynamic, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::i32, Shape{}); + auto stop = make_shared(element::dynamic, Shape{}); + auto step = make_shared(element::i32, Shape{}); - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); EXPECT_EQ(range->get_element_type(), element::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_all_dyn_et_ok) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::dynamic, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::dynamic, Shape{}); + auto stop = make_shared(element::dynamic, Shape{}); + auto step = make_shared(element::dynamic, Shape{}); - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); EXPECT_EQ(range->get_element_type(), element::dynamic); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_f32_ok) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::dynamic, Shape{}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::dynamic, Shape{}); - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); EXPECT_EQ(range->get_element_type(), element::f32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_boolean_fails) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::boolean, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::dynamic, Shape{}); + auto stop = make_shared(element::boolean, Shape{}); + auto step = make_shared(element::dynamic, Shape{}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "Boolean element type not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Element type for start, stop, and step, must not be boolean."); @@ -78,23 +79,23 @@ TEST(type_prop, range_nonconst_boolean_fails) { } TEST(type_prop, range_some_const_ok) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}, std::vector{2}); + auto start = make_shared(element::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::i32, Shape{}); + auto step = make_shared(element::i32, Shape{}, std::vector{2}); - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); EXPECT_EQ(range->get_element_type(), element::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_some_const_zero_stride_fails) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}, std::vector{0}); + auto start = make_shared(element::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::i32, Shape{}); + auto step = make_shared(element::i32, Shape{}, std::vector{0}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "Zero stride not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'step' cannot be zero"); @@ -104,13 +105,14 @@ TEST(type_prop, range_some_const_zero_stride_fails) { } TEST(type_prop, range_some_const_plus_inf_start_fails) { - auto start = - make_shared(element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, + Shape{}, + std::vector{std::numeric_limits::infinity()}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "+Infinity start not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'start' cannot be nan or infinite."); @@ -122,13 +124,14 @@ TEST(type_prop, range_some_const_plus_inf_start_fails) { } TEST(type_prop, range_some_const_minus_inf_start_fails) { - auto start = - make_shared(element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, + Shape{}, + std::vector{-std::numeric_limits::infinity()}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "-Infinity start not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'start' cannot be nan or infinite."); @@ -142,12 +145,12 @@ TEST(type_prop, range_some_const_minus_inf_start_fails) { } TEST(type_prop, range_some_const_nan_start_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "NaN start not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'start' cannot be nan or infinite."); @@ -161,13 +164,14 @@ TEST(type_prop, range_some_const_nan_start_fails) { } TEST(type_prop, range_some_const_plus_inf_stop_fails) { - auto start = make_shared(element::f32, Shape{}); - auto stop = - make_shared(element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}); + auto stop = make_shared(element::f32, + Shape{}, + std::vector{std::numeric_limits::infinity()}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "+Infinity stop not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'stop' cannot be nan or infinite."); @@ -179,13 +183,14 @@ TEST(type_prop, range_some_const_plus_inf_stop_fails) { } TEST(type_prop, range_some_const_minus_inf_stop_fails) { - auto start = make_shared(element::f32, Shape{}); - auto stop = - make_shared(element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}); + auto stop = make_shared(element::f32, + Shape{}, + std::vector{-std::numeric_limits::infinity()}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "-Infinity stop not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'stop' cannot be nan or infinite."); @@ -199,12 +204,12 @@ TEST(type_prop, range_some_const_minus_inf_stop_fails) { } TEST(type_prop, range_some_const_nan_stio_fails) { - auto start = make_shared(element::f32, Shape{}); - auto stop = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}); + auto stop = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "NaN stop not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'stop' cannot be nan or infinite."); @@ -218,13 +223,14 @@ TEST(type_prop, range_some_const_nan_stio_fails) { } TEST(type_prop, range_some_const_plus_inf_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); - auto step = - make_shared(element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); + auto start = make_shared(element::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, + Shape{}, + std::vector{std::numeric_limits::infinity()}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "+Infinity stride not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'step' cannot be zero, nan, or infinite."); @@ -236,13 +242,14 @@ TEST(type_prop, range_some_const_plus_inf_stride_fails) { } TEST(type_prop, range_some_const_minus_inf_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); - auto step = - make_shared(element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); + auto start = make_shared(element::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, + Shape{}, + std::vector{-std::numeric_limits::infinity()}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "-Infinity stride not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'step' cannot be zero, nan, or infinite."); @@ -256,12 +263,12 @@ TEST(type_prop, range_some_const_minus_inf_stride_fails) { } TEST(type_prop, range_some_const_nan_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); + auto start = make_shared(element::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "NaN stride not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'step' cannot be zero, nan, or infinite."); @@ -275,12 +282,12 @@ TEST(type_prop, range_some_const_nan_stride_fails) { } TEST(type_prop, range_all_const_zero_stride_fails) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}, std::vector{5}); - auto step = make_shared(element::i32, Shape{}, std::vector{0}); + auto start = make_shared(element::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::i32, Shape{}, std::vector{5}); + auto step = make_shared(element::i32, Shape{}, std::vector{0}); try { - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); FAIL() << "Zero stride not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "'step' cannot be zero"); @@ -291,11 +298,11 @@ TEST(type_prop, range_all_const_zero_stride_fails) { template void run_range_test(const element::Type& et, const RangeParams& params) { - auto start = make_shared(et, Shape{}, std::vector{static_cast(params.start)}); - auto stop = make_shared(et, Shape{}, std::vector{static_cast(params.stop)}); - auto step = make_shared(et, Shape{}, std::vector{static_cast(params.step)}); + auto start = make_shared(et, Shape{}, std::vector{static_cast(params.start)}); + auto stop = make_shared(et, Shape{}, std::vector{static_cast(params.stop)}); + auto step = make_shared(et, Shape{}, std::vector{static_cast(params.step)}); - auto range = make_shared(start, stop, step); + auto range = make_shared(start, stop, step); EXPECT_EQ(range->get_element_type(), et); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(params.expected_shape)) @@ -438,9 +445,9 @@ TEST(type_prop, range_v4_all_const_shape_inference) { int start_val = 0; int stop_val = num_elems * step_val + start_val; element::Type_t et = element::i32; - auto start = make_shared(et, Shape{}, std::vector{start_val}); - auto stop = make_shared(et, Shape{}, std::vector{stop_val}); - auto step = make_shared(et, Shape{}, std::vector{step_val}); + auto start = make_shared(et, Shape{}, std::vector{start_val}); + auto stop = make_shared(et, Shape{}, std::vector{stop_val}); + auto step = make_shared(et, Shape{}, std::vector{step_val}); auto range = make_shared(start, stop, step, et); auto pshape_out = range->get_output_partial_shape(0); ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1}); @@ -451,9 +458,9 @@ TEST(type_prop, range_v4_some_const_shape_inference) { int step_val = 5; int start_val = 0; element::Type_t et = element::i32; - auto start = make_shared(et, Shape{}, std::vector{start_val}); - auto stop = make_shared(et, Shape{}); - auto step = make_shared(et, Shape{}, std::vector{step_val}); + auto start = make_shared(et, Shape{}, std::vector{start_val}); + auto stop = make_shared(et, Shape{}); + auto step = make_shared(et, Shape{}, std::vector{step_val}); auto range = make_shared(start, stop, step, et); auto pshape_out = range->get_output_partial_shape(0); ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1}); @@ -462,9 +469,9 @@ TEST(type_prop, range_v4_some_const_shape_inference) { TEST(type_prop, range_v4_trunc_inputs_shape_inference) { element::Type_t et = element::f32; - auto start = make_shared(et, Shape{}, std::vector{0.9f}); - auto stop = make_shared(et, Shape{}, std::vector{10.3f}); - auto step = make_shared(et, Shape{}, std::vector{1.7f}); + auto start = make_shared(et, Shape{}, std::vector{0.9f}); + auto stop = make_shared(et, Shape{}, std::vector{10.3f}); + auto step = make_shared(et, Shape{}, std::vector{1.7f}); auto range = make_shared(start, stop, step, element::i32); auto pshape_out = range->get_output_partial_shape(0); ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1}); @@ -474,12 +481,12 @@ TEST(type_prop, range_v4_trunc_inputs_shape_inference) { TEST(type_prop, range_v4_invalid_inputs_elem_type) { // invalid element type for start scalar try { - auto start = make_shared(element::boolean, Shape{}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::boolean, Shape{}); + auto stop = make_shared(element::i32, Shape{}); + auto step = make_shared(element::i32, Shape{}); auto range = make_shared(start, stop, step, element::i32); FAIL() << "Exception expected"; - } catch (ngraph::NodeValidationFailure& error) { + } catch (ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("'start' input scalar should be a numeric type")); } catch (...) { FAIL() << "Unknown exception was thrown"; @@ -487,12 +494,12 @@ TEST(type_prop, range_v4_invalid_inputs_elem_type) { // invalid element type for stop scalar try { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::boolean, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::dynamic, Shape{}); + auto stop = make_shared(element::boolean, Shape{}); + auto step = make_shared(element::i32, Shape{}); auto range = make_shared(start, stop, step, element::i32); FAIL() << "Exception expected"; - } catch (ngraph::NodeValidationFailure& error) { + } catch (ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("'stop' input scalar should be a numeric type")); } catch (...) { FAIL() << "Unknown exception was thrown"; @@ -500,12 +507,12 @@ TEST(type_prop, range_v4_invalid_inputs_elem_type) { // invalid element type for step scalar try { - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::undefined, Shape{}); - auto step = make_shared(element::boolean, Shape{}); + auto start = make_shared(element::i32, Shape{}); + auto stop = make_shared(element::undefined, Shape{}); + auto step = make_shared(element::boolean, Shape{}); auto range = make_shared(start, stop, step, element::i32); FAIL() << "Exception expected"; - } catch (const ngraph::NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("'step' input scalar should be a numeric type")); } catch (...) { FAIL() << "Unknown exception was thrown"; @@ -514,11 +521,11 @@ TEST(type_prop, range_v4_invalid_inputs_elem_type) { TEST(type_prop, range_v4_invalid_output_elem_type) { try { - auto start = make_shared(element::f16, Shape{1}); - auto stop = make_shared(element::f16, Shape{}); - auto step = make_shared(element::f16, Shape{}); + auto start = make_shared(element::f16, Shape{1}); + auto stop = make_shared(element::f16, Shape{}); + auto step = make_shared(element::f16, Shape{}); auto range = make_shared(start, stop, step, element::boolean); - } catch (const ngraph::NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("output tensor type should be a numeric type")); } catch (...) { FAIL() << "Unknown exception was thrown"; @@ -528,12 +535,12 @@ TEST(type_prop, range_v4_invalid_output_elem_type) { TEST(type_prop, range_v4_invalid_inputs_non_scalar) { // start input not a scalar try { - auto start = make_shared(element::f32, Shape{1}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}); + auto start = make_shared(element::f32, Shape{1}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "Exception expected"; - } catch (const ngraph::NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("'start' input is not a scalar")); } catch (...) { FAIL() << "Unknown exception was thrown"; @@ -541,12 +548,12 @@ TEST(type_prop, range_v4_invalid_inputs_non_scalar) { // stop input not a scalar try { - auto start = make_shared(element::f32, Shape{}); - auto stop = make_shared(element::f32, PartialShape{Dimension::dynamic()}); - auto step = make_shared(element::f32, Shape{}); + auto start = make_shared(element::f32, Shape{}); + auto stop = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto step = make_shared(element::f32, Shape{}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "Exception expected"; - } catch (const ngraph::NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("'stop' input is not a scalar")); } catch (...) { FAIL() << "Unknown exception was thrown"; @@ -554,12 +561,12 @@ TEST(type_prop, range_v4_invalid_inputs_non_scalar) { // step input not a scalar try { - auto start = make_shared(element::f32, Shape{}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, PartialShape::dynamic(2)); + auto start = make_shared(element::f32, Shape{}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, PartialShape::dynamic(2)); auto range = make_shared(start, stop, step, element::f32); FAIL() << "Exception expected"; - } catch (const ngraph::NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("'step' input is not a scalar")); } catch (...) { FAIL() << "Unknown exception was thrown"; @@ -569,11 +576,11 @@ TEST(type_prop, range_v4_invalid_inputs_non_scalar) { TEST(type_prop, range_v4_invalid_inputs_plus_inf) { // invalid start input scalar, +inf try { - auto start = make_shared(element::f32, - Shape{}, - std::vector{std::numeric_limits::infinity()}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, + Shape{}, + std::vector{std::numeric_limits::infinity()}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "+Infinity start not detected"; } catch (const NodeValidationFailure& error) { @@ -586,11 +593,11 @@ TEST(type_prop, range_v4_invalid_inputs_plus_inf) { // invalid stop input scalar, +inf try { - auto start = make_shared(element::f32, Shape{}); - auto stop = make_shared(element::f32, - Shape{}, - std::vector{std::numeric_limits::infinity()}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}); + auto stop = make_shared(element::f32, + Shape{}, + std::vector{std::numeric_limits::infinity()}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "+Infinity stop not detected"; } catch (const NodeValidationFailure& error) { @@ -603,11 +610,11 @@ TEST(type_prop, range_v4_invalid_inputs_plus_inf) { // invalid step input scalar, +inf try { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, - Shape{}, - std::vector{std::numeric_limits::infinity()}); + auto start = make_shared(element::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, + Shape{}, + std::vector{std::numeric_limits::infinity()}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "+Infinity step not detected"; } catch (const NodeValidationFailure& error) { @@ -622,11 +629,11 @@ TEST(type_prop, range_v4_invalid_inputs_plus_inf) { TEST(type_prop, range_v4_invalid_inputs_minus_inf) { // invalid start input scalar, -inf try { - auto start = make_shared(element::f32, - Shape{}, - std::vector{-std::numeric_limits::infinity()}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, + Shape{}, + std::vector{-std::numeric_limits::infinity()}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "-Infinity start not detected"; } catch (const NodeValidationFailure& error) { @@ -641,11 +648,11 @@ TEST(type_prop, range_v4_invalid_inputs_minus_inf) { // invalid stop input scalar, -inf try { - auto start = make_shared(element::f32, Shape{}); - auto stop = make_shared(element::f32, - Shape{}, - std::vector{-std::numeric_limits::infinity()}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}); + auto stop = make_shared(element::f32, + Shape{}, + std::vector{-std::numeric_limits::infinity()}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "-Infinity stop not detected"; } catch (const NodeValidationFailure& error) { @@ -660,11 +667,11 @@ TEST(type_prop, range_v4_invalid_inputs_minus_inf) { // invalid step input scalar, -inf try { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, - Shape{}, - std::vector{-std::numeric_limits::infinity()}); + auto start = make_shared(element::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, + Shape{}, + std::vector{-std::numeric_limits::infinity()}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "-Infinity step not detected"; } catch (const NodeValidationFailure& error) { @@ -681,9 +688,9 @@ TEST(type_prop, range_v4_invalid_inputs_minus_inf) { TEST(type_prop, range_v4_invalid_inputs_nan) { // invalid start input scalar, nan try { - auto start = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "NaN start not detected"; } catch (const NodeValidationFailure& error) { @@ -698,9 +705,9 @@ TEST(type_prop, range_v4_invalid_inputs_nan) { // invalid stop input scalar, nan try { - auto start = make_shared(element::f32, Shape{}); - auto stop = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}); + auto stop = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "NaN stop not detected"; } catch (const NodeValidationFailure& error) { @@ -715,9 +722,9 @@ TEST(type_prop, range_v4_invalid_inputs_nan) { // invalid step input scalar, nan try { - auto start = make_shared(element::f32, Shape{}, std::vector{1}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); + auto start = make_shared(element::f32, Shape{}, std::vector{1}); + auto stop = make_shared(element::f32, Shape{}); + auto step = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); auto range = make_shared(start, stop, step, element::f32); FAIL() << "NaN step not detected"; } catch (const NodeValidationFailure& error) { @@ -732,18 +739,18 @@ TEST(type_prop, range_v4_invalid_inputs_nan) { } TEST(type_prop, range_v4_zero_output_elem_pos_step) { - auto start = make_shared(element::f32, Shape{}, std::vector{5}); - auto stop = make_shared(element::f32, Shape{}, std::vector{1}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::f32, Shape{}, std::vector{5}); + auto stop = make_shared(element::f32, Shape{}, std::vector{1}); + auto step = make_shared(element::f32, Shape{}, std::vector{1}); auto range = make_shared(start, stop, step, element::f32); // if step is positive and start >= stop, number of output elements is zero ASSERT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(0)})); } TEST(type_prop, range_v4_zero_output_elem_neg_step) { - auto start = make_shared(element::f32, Shape{}, std::vector{1}); - auto stop = make_shared(element::f32, Shape{}, std::vector{5}); - auto step = make_shared(element::f32, Shape{}, std::vector{-1}); + auto start = make_shared(element::f32, Shape{}, std::vector{1}); + auto stop = make_shared(element::f32, Shape{}, std::vector{5}); + auto step = make_shared(element::f32, Shape{}, std::vector{-1}); auto range = make_shared(start, stop, step, element::f32); // if step is negative and start <= stop, number of output elements is zero ASSERT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(0)})); @@ -751,9 +758,9 @@ TEST(type_prop, range_v4_zero_output_elem_neg_step) { template void run_range_v4_test(const element::Type& et, const RangeParams& params) { - auto start = make_shared(et, Shape{}, std::vector{static_cast(params.start)}); - auto stop = make_shared(et, Shape{}, std::vector{static_cast(params.stop)}); - auto step = make_shared(et, Shape{}, std::vector{static_cast(params.step)}); + auto start = make_shared(et, Shape{}, std::vector{static_cast(params.start)}); + auto stop = make_shared(et, Shape{}, std::vector{static_cast(params.stop)}); + auto step = make_shared(et, Shape{}, std::vector{static_cast(params.step)}); auto range = make_shared(start, stop, step, et); diff --git a/src/core/tests/type_prop/rdft.cpp b/src/core/tests/type_prop/rdft.cpp index e6dd54b63200f3..7b87b565b76352 100644 --- a/src/core/tests/type_prop/rdft.cpp +++ b/src/core/tests/type_prop/rdft.cpp @@ -14,11 +14,12 @@ // limitations under the License. //***************************************************************************** +#include "openvino/op/rdft.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" -using namespace ngraph; +using namespace ov; struct RDFTConstantAxesAndConstantSignalSizeTestParams { PartialShape input_shape; @@ -35,15 +36,15 @@ struct RDFTConstantAxesAndConstantSignalSizeTest TEST_P(RDFTConstantAxesAndConstantSignalSizeTest, rdft_constant_axes_and_signal_size) { auto params = GetParam(); - auto data = std::make_shared(element::f32, params.input_shape); - auto axes_input = op::Constant::create(element::i64, params.axes_shape, params.axes); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = ov::op::v0::Constant::create(element::i64, params.axes_shape, params.axes); std::shared_ptr rdft; if (params.signal_size.empty()) { - rdft = std::make_shared(data, axes_input); + rdft = std::make_shared(data, axes_input); } else { auto signal_size_input = - op::Constant::create(element::i64, params.signal_size_shape, params.signal_size); + ov::op::v0::Constant::create(element::i64, params.signal_size_shape, params.signal_size); rdft = std::make_shared(data, axes_input, signal_size_input); } @@ -144,8 +145,8 @@ TEST(type_prop, rdft_dynamic_axes) { const auto axes_shape = PartialShape::dynamic(); const auto ref_output_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 2}; - auto data = std::make_shared(element::f32, input_shape); - auto axes_input = std::make_shared(element::i64, axes_shape); + auto data = std::make_shared(element::f32, input_shape); + auto axes_input = std::make_shared(element::i64, axes_shape); auto rdft = std::make_shared(data, axes_input); EXPECT_EQ(rdft->get_element_type(), element::f32); @@ -163,8 +164,8 @@ struct RDFTNonConstantAxesTest : ::testing::TestWithParam(element::f32, params.input_shape); - auto axes_input = std::make_shared(element::i64, params.axes_shape); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = std::make_shared(element::i64, params.axes_shape); auto rdft = std::make_shared(data, axes_input); EXPECT_EQ(rdft->get_element_type(), element::f32); @@ -214,9 +215,9 @@ struct RDFTNonConstantSignalSizeTest : ::testing::TestWithParam(element::f32, params.input_shape); - auto axes_input = op::Constant::create(element::i64, params.axes_shape, params.axes); - auto signal_size_input = std::make_shared(element::i64, params.signal_size_shape); + auto data = std::make_shared(element::f32, params.input_shape); + auto axes_input = ov::op::v0::Constant::create(element::i64, params.axes_shape, params.axes); + auto signal_size_input = std::make_shared(element::i64, params.signal_size_shape); auto rdft = std::make_shared(data, axes_input, signal_size_input); EXPECT_EQ(rdft->get_element_type(), element::f32); @@ -245,10 +246,10 @@ INSTANTIATE_TEST_SUITE_P( PrintToDummyParamName()); TEST(type_prop, rdft_invalid_input) { - auto axes = op::Constant::create(element::i64, Shape{2}, {0, 1}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 1}); try { - auto data = std::make_shared(element::f32, Shape{}); + auto data = std::make_shared(element::f32, Shape{}); auto rdft = std::make_shared(data, axes); FAIL() << "RDFT node was created with invalid input."; } catch (const NodeValidationFailure& error) { @@ -256,7 +257,7 @@ TEST(type_prop, rdft_invalid_input) { } try { - auto data = std::make_shared(element::f32, Shape{4}); + auto data = std::make_shared(element::f32, Shape{4}); auto rdft = std::make_shared(data, axes); FAIL() << "RDFT node was created with invalid input."; } catch (const NodeValidationFailure& error) { @@ -266,10 +267,10 @@ TEST(type_prop, rdft_invalid_input) { } TEST(type_prop, rdft_invalid_axes) { - auto data = std::make_shared(element::f32, Shape{4, 3, 2}); + auto data = std::make_shared(element::f32, Shape{4, 3, 2}); try { - auto axes = op::Constant::create(element::i64, Shape{1}, {3}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {3}); auto rdft = std::make_shared(data, axes); FAIL() << "RDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -277,7 +278,7 @@ TEST(type_prop, rdft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{1}, {-4}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {-4}); auto rdft = std::make_shared(data, axes); FAIL() << "RDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -285,7 +286,7 @@ TEST(type_prop, rdft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{2}, {0, -3}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, -3}); auto rdft = std::make_shared(data, axes); FAIL() << "RDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -293,7 +294,7 @@ TEST(type_prop, rdft_invalid_axes) { } try { - auto axes = op::Constant::create(element::i64, Shape{1, 2}, {0, 1}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1, 2}, {0, 1}); auto rdft = std::make_shared(data, axes); FAIL() << "RDFT node was created with invalid axes."; } catch (const NodeValidationFailure& error) { @@ -302,11 +303,11 @@ TEST(type_prop, rdft_invalid_axes) { } TEST(type_prop, rdft_invalid_signal_size) { - auto data = std::make_shared(element::f32, Shape{4, 3, 2}); - auto axes = op::Constant::create(element::i64, Shape{1}, {0}); + auto data = std::make_shared(element::f32, Shape{4, 3, 2}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {0}); try { - auto signal_size = op::Constant::create(element::i64, Shape{1, 2}, {0, 1}); + auto signal_size = ov::op::v0::Constant::create(element::i64, Shape{1, 2}, {0, 1}); auto rdft = std::make_shared(data, axes, signal_size); FAIL() << "RDFT node was created with invalid signal size."; } catch (const NodeValidationFailure& error) { @@ -314,7 +315,7 @@ TEST(type_prop, rdft_invalid_signal_size) { } try { - auto signal_size = op::Constant::create(element::i64, Shape{2}, {0, 1}); + auto signal_size = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 1}); auto rdft = std::make_shared(data, axes, signal_size); FAIL() << "RDFT node was created with invalid signal size."; } catch (const NodeValidationFailure& error) { @@ -328,9 +329,9 @@ TEST(type_prop, rdft_dynamic_types) { const auto signal_size_shape = PartialShape::dynamic(); const auto ref_output_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 2}; - auto data = std::make_shared(element::dynamic, input_shape); - auto axes_input = std::make_shared(element::dynamic, axes_shape); - auto signal_size_input = std::make_shared(element::dynamic, signal_size_shape); + auto data = std::make_shared(element::dynamic, input_shape); + auto axes_input = std::make_shared(element::dynamic, axes_shape); + auto signal_size_input = std::make_shared(element::dynamic, signal_size_shape); auto rdft = std::make_shared(data, axes_input, signal_size_input); EXPECT_EQ(rdft->get_element_type(), element::dynamic); diff --git a/src/core/tests/type_prop/read_value.cpp b/src/core/tests/type_prop/read_value.cpp index 7992038a1afd9b..8f8765eddcc799 100644 --- a/src/core/tests/type_prop/read_value.cpp +++ b/src/core/tests/type_prop/read_value.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/read_value.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset5.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, read_value_deduce) { - auto input = make_shared(element::f32, Shape{1, 2, 64, 64}); - auto read_value = make_shared(input, "variable_id"); + auto input = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto read_value = make_shared(input, "variable_id"); ASSERT_EQ(read_value->get_element_type(), element::f32); ASSERT_EQ(read_value->get_shape(), (Shape{1, 2, 64, 64})); diff --git a/src/core/tests/type_prop/reduce_l1.cpp b/src/core/tests/type_prop/reduce_l1.cpp index fbe7df48b7df66..624c91275cc0ae 100644 --- a/src/core/tests/type_prop/reduce_l1.cpp +++ b/src/core/tests/type_prop/reduce_l1.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_l1.hpp" + #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_l1, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_l1_et, ReduceArithmeticTest, Type); diff --git a/src/core/tests/type_prop/reduce_l2.cpp b/src/core/tests/type_prop/reduce_l2.cpp index ecd8db9a41a5f3..d4bba5a8b239e1 100644 --- a/src/core/tests/type_prop/reduce_l2.cpp +++ b/src/core/tests/type_prop/reduce_l2.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_l2.hpp" + #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_l2, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_l2_et, ReduceArithmeticTest, Type); diff --git a/src/core/tests/type_prop/reduce_logical_and.cpp b/src/core/tests/type_prop/reduce_logical_and.cpp index 734b6574e9062f..b4d74666e009e5 100644 --- a/src/core/tests/type_prop/reduce_logical_and.cpp +++ b/src/core/tests/type_prop/reduce_logical_and.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_logical_and.hpp" + #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_logical_and, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_logical_and_et, ReduceLogicalTest, Type); diff --git a/src/core/tests/type_prop/reduce_logical_or.cpp b/src/core/tests/type_prop/reduce_logical_or.cpp index 75c250d80c6a1c..61f83c9e2545eb 100644 --- a/src/core/tests/type_prop/reduce_logical_or.cpp +++ b/src/core/tests/type_prop/reduce_logical_or.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_logical_or.hpp" + #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_logical_or, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_logical_or_et, ReduceLogicalTest, Type); diff --git a/src/core/tests/type_prop/reduce_max.cpp b/src/core/tests/type_prop/reduce_max.cpp index 3bde75d3f452c8..265742106f3396 100644 --- a/src/core/tests/type_prop/reduce_max.cpp +++ b/src/core/tests/type_prop/reduce_max.cpp @@ -2,17 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_max.hpp" + +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_max, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_max_et, ReduceArithmeticTest, Type); TEST(type_prop, reduce_max_value_propagation) { - const auto param = std::make_shared(element::f32, PartialShape{{1, 8}, {2, 3}, 6}); + const auto param = std::make_shared(element::f32, PartialShape{{1, 8}, {2, 3}, 6}); const auto shape_of = std::make_shared(param); const auto reduce_prod = - std::make_shared(shape_of, op::Constant::create(element::i64, {1}, {0}), true); + std::make_shared(shape_of, ov::op::v0::Constant::create(element::i64, {1}, {0}), true); const auto reshape = std::make_shared(param, reduce_prod, false); EXPECT_EQ(reshape->get_element_type(), ov::element::f32); diff --git a/src/core/tests/type_prop/reduce_mean.cpp b/src/core/tests/type_prop/reduce_mean.cpp index f4ecd3613a76e9..1c66a5d589118f 100644 --- a/src/core/tests/type_prop/reduce_mean.cpp +++ b/src/core/tests/type_prop/reduce_mean.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_mean.hpp" + #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_mean, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_mean_et, ReduceArithmeticTest, Type); diff --git a/src/core/tests/type_prop/reduce_min.cpp b/src/core/tests/type_prop/reduce_min.cpp index eecd4a27b118fc..0c91f3c27db92c 100644 --- a/src/core/tests/type_prop/reduce_min.cpp +++ b/src/core/tests/type_prop/reduce_min.cpp @@ -2,17 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_min.hpp" + +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_min, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_min_et, ReduceArithmeticTest, Type); TEST(type_prop, reduce_min_value_propagation) { - const auto param = std::make_shared(element::f32, PartialShape{{1, 8}, {2, 3}, 6}); + const auto param = std::make_shared(element::f32, PartialShape{{1, 8}, {2, 3}, 6}); const auto shape_of = std::make_shared(param); const auto reduce_prod = - std::make_shared(shape_of, op::Constant::create(element::i64, {1}, {0}), true); + std::make_shared(shape_of, ov::op::v0::Constant::create(element::i64, {1}, {0}), true); const auto reshape = std::make_shared(param, reduce_prod, false); EXPECT_EQ(reshape->get_element_type(), ov::element::f32); diff --git a/src/core/tests/type_prop/reduce_ops.hpp b/src/core/tests/type_prop/reduce_ops.hpp index e9cc04ae6d98a3..49a5c35961f6c9 100644 --- a/src/core/tests/type_prop/reduce_ops.hpp +++ b/src/core/tests/type_prop/reduce_ops.hpp @@ -2,13 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace std; -using namespace ngraph; +using namespace ov; struct ReduceParams { PartialShape data_ps; @@ -21,15 +23,15 @@ struct ReduceParams { template static std::shared_ptr makeReduceOp(const ReduceParams& p, bool axes_as_param = false) { - auto in_data = make_shared(p.data_et, p.data_ps); + auto in_data = make_shared(p.data_et, p.data_ps); shared_ptr in_axes; if (axes_as_param) { - in_axes = make_shared(p.axes_et, p.axes_ps); + in_axes = make_shared(p.axes_et, p.axes_ps); } else { if (shape_size(p.axes_ps) != p.axes.size()) { OPENVINO_THROW("Axes shape does not match with axes elements"); } - in_axes = make_shared(p.axes_et, p.axes_ps, p.axes); + in_axes = make_shared(p.axes_et, p.axes_ps, p.axes); } return make_shared(in_data, in_axes, p.keep_dims); } @@ -49,8 +51,8 @@ TYPED_TEST_P(ReduceTest, reduce_default_ctor) { bool keep_dims = true; - const auto data = make_shared(data_et, data_ps); - const auto in_axes = make_shared(axes_et, axes_ps); + const auto data = make_shared(data_et, data_ps); + const auto in_axes = make_shared(axes_et, axes_ps); auto op = std::make_shared(); op->set_arguments(OutputVector{data, in_axes}); diff --git a/src/core/tests/type_prop/reduce_prod.cpp b/src/core/tests/type_prop/reduce_prod.cpp index f66e8c44d31a8c..20cc6699a6b61b 100644 --- a/src/core/tests/type_prop/reduce_prod.cpp +++ b/src/core/tests/type_prop/reduce_prod.cpp @@ -2,17 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_prod.hpp" + +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_prod, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_prod_et, ReduceArithmeticTest, Type); TEST(type_prop, reduce_prod_value_propagation) { - const auto param = std::make_shared(element::f32, PartialShape{{1, 8}, {2, 3}, 6}); + const auto param = std::make_shared(element::f32, PartialShape{{1, 8}, {2, 3}, 6}); const auto shape_of = std::make_shared(param); const auto reduce_prod = - std::make_shared(shape_of, op::Constant::create(element::i64, {1}, {0}), true); + std::make_shared(shape_of, ov::op::v0::Constant::create(element::i64, {1}, {0}), true); const auto reshape = std::make_shared(param, reduce_prod, false); EXPECT_EQ(reshape->get_element_type(), ov::element::f32); diff --git a/src/core/tests/type_prop/reduce_sum.cpp b/src/core/tests/type_prop/reduce_sum.cpp index 356cff2bc3f48d..83b3eae7bb79d2 100644 --- a/src/core/tests/type_prop/reduce_sum.cpp +++ b/src/core/tests/type_prop/reduce_sum.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reduce_sum.hpp" + #include "reduce_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_sum, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_sum_et, ReduceArithmeticTest, Type); diff --git a/src/core/tests/type_prop/relu.cpp b/src/core/tests/type_prop/relu.cpp index bbce6b116c98dd..702e0412006629 100644 --- a/src/core/tests/type_prop/relu.cpp +++ b/src/core/tests/type_prop/relu.cpp @@ -2,25 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/relu.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, relu_2d) { - auto param = make_shared(element::f32, Shape{2, 4}); + auto param = make_shared(element::f32, Shape{2, 4}); Shape relu_shape{2, 4}; - auto relu = make_shared(param); + auto relu = make_shared(param); ASSERT_EQ(relu->get_element_type(), element::f32); ASSERT_EQ(relu->get_shape(), relu_shape); } TEST(type_prop, relu_4d) { - auto param = make_shared(element::f32, Shape{2, 2, 2, 2}); + auto param = make_shared(element::f32, Shape{2, 2, 2, 2}); Shape relu_shape{2, 2, 2, 2}; - auto relu = make_shared(param); + auto relu = make_shared(param); ASSERT_EQ(relu->get_element_type(), element::f32); ASSERT_EQ(relu->get_shape(), relu_shape); } diff --git a/src/core/tests/type_prop/reshape.cpp b/src/core/tests/type_prop/reshape.cpp index 343cf38b6ee3db..77f475b5e2026a 100644 --- a/src/core/tests/type_prop/reshape.cpp +++ b/src/core/tests/type_prop/reshape.cpp @@ -2,16 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reshape.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/reduce_prod.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/unsqueeze.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, static_value_propagation) { - auto param = make_shared(element::f32, Shape{1, 2, 3}); + auto param = make_shared(element::f32, Shape{1, 2, 3}); auto shape_of = make_shared(param); auto r = make_shared(param, shape_of, false); @@ -21,7 +32,7 @@ TEST(type_prop, static_value_propagation) { } TEST(type_prop, interval_value_propagation) { - auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); + auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); auto shape_of = make_shared(param); auto r = make_shared(param, shape_of, false); @@ -38,11 +49,11 @@ TEST(type_prop, interval_value_propagation) { } TEST(type_prop, static_value_propagation_through_gather) { - auto param = make_shared(element::f32, Shape{1, 2, 3}); + auto param = make_shared(element::f32, Shape{1, 2, 3}); auto shape_of = make_shared(param); auto gather = make_shared(shape_of, - op::Constant::create(element::i64, {3}, {2, 1, 0}), - op::Constant::create(element::i64, {}, {0})); + ov::op::v0::Constant::create(element::i64, {3}, {2, 1, 0}), + ov::op::v0::Constant::create(element::i64, {}, {0})); auto r = make_shared(param, gather, false); @@ -51,11 +62,11 @@ TEST(type_prop, static_value_propagation_through_gather) { } TEST(type_prop, interval_value_propagation_through_gather) { - auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); + auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); auto shape_of = make_shared(param); auto gather = make_shared(shape_of, - op::Constant::create(element::i64, {3}, {2, 1, 0}), - op::Constant::create(element::i64, {}, {0})); + ov::op::v0::Constant::create(element::i64, {3}, {2, 1, 0}), + ov::op::v0::Constant::create(element::i64, {}, {0})); auto r = make_shared(param, gather, false); @@ -64,15 +75,15 @@ TEST(type_prop, interval_value_propagation_through_gather) { } TEST(type_prop, interval_value_propagation_through_consecutive_gathers) { - auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); + auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); auto shape_of = make_shared(param); auto gather_1 = make_shared(shape_of, - op::Constant::create(element::i64, {3}, {2, 1, 0}), - op::Constant::create(element::i64, {}, {0})); + ov::op::v0::Constant::create(element::i64, {3}, {2, 1, 0}), + ov::op::v0::Constant::create(element::i64, {}, {0})); auto gather_2 = make_shared(gather_1, - op::Constant::create(element::i64, {3}, {1, 2, 0}), - op::Constant::create(element::i64, {}, {0})); + ov::op::v0::Constant::create(element::i64, {3}, {1, 2, 0}), + ov::op::v0::Constant::create(element::i64, {}, {0})); auto r = make_shared(param, gather_2, false); @@ -81,26 +92,27 @@ TEST(type_prop, interval_value_propagation_through_consecutive_gathers) { } TEST(type_prop, interval_value_propagation_concatenated_gathers) { - auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); + auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); auto shape_of = make_shared(param); auto gather_1 = make_shared(shape_of, - op::Constant::create(element::i64, {}, {2}), - op::Constant::create(element::i64, {}, {0})); - auto dim_1 = make_shared(gather_1, op::Constant::create(element::i64, {1}, {0})); + ov::op::v0::Constant::create(element::i64, {}, {2}), + ov::op::v0::Constant::create(element::i64, {}, {0})); + auto dim_1 = make_shared(gather_1, ov::op::v0::Constant::create(element::i64, {1}, {0})); auto gather_2 = make_shared(shape_of, - op::Constant::create(element::i64, {}, {1}), - op::Constant::create(element::i64, {}, {0})); - auto tmp_dim_2 = make_shared(gather_2, op::Constant::create(element::i64, {2}, {1, 1}), true); - auto dim_2 = make_shared(tmp_dim_2, op::Constant::create(element::i64, {1}, {0})); + ov::op::v0::Constant::create(element::i64, {}, {1}), + ov::op::v0::Constant::create(element::i64, {}, {0})); + auto tmp_dim_2 = + make_shared(gather_2, ov::op::v0::Constant::create(element::i64, {2}, {1, 1}), true); + auto dim_2 = make_shared(tmp_dim_2, ov::op::v0::Constant::create(element::i64, {1}, {0})); auto gather_3 = make_shared(shape_of, - op::Constant::create(element::i64, {}, {0}), - op::Constant::create(element::i64, {}, {0})); - auto dim_3 = make_shared(gather_3, op::Constant::create(element::i64, {1}, {0})); + ov::op::v0::Constant::create(element::i64, {}, {0}), + ov::op::v0::Constant::create(element::i64, {}, {0})); + auto dim_3 = make_shared(gather_3, ov::op::v0::Constant::create(element::i64, {1}, {0})); - auto shape = make_shared(OutputVector{dim_1, dim_2, dim_3}, 0); + auto shape = make_shared(OutputVector{dim_1, dim_2, dim_3}, 0); auto r = make_shared(param, shape, false); ASSERT_EQ(r->get_element_type(), element::f32); @@ -108,13 +120,13 @@ TEST(type_prop, interval_value_propagation_concatenated_gathers) { } TEST(type_prop, interval_value_propagation_mul_div) { - auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 2}); + auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 2}); auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto mul = make_shared(cast_fp, op::Constant::create(element::f32, {3}, {-2, 2, -4})); - auto div = make_shared(mul, op::Constant::create(element::f32, {3}, {-2, 2, -4})); - auto cast_int = make_shared(div, element::i32); + auto cast_fp = make_shared(shape_of, element::f32); + auto mul = make_shared(cast_fp, ov::op::v0::Constant::create(element::f32, {3}, {-2, 2, -4})); + auto div = make_shared(mul, ov::op::v0::Constant::create(element::f32, {3}, {-2, 2, -4})); + auto cast_int = make_shared(div, element::i32); auto r = make_shared(param, cast_int, false); @@ -123,15 +135,15 @@ TEST(type_prop, interval_value_propagation_mul_div) { } TEST(type_prop, interval_value_propagation_mul_div_rhs_shape) { - auto param = - make_shared(element::f32, PartialShape{Dimension(1, 5), Dimension(0, 4), Dimension(2, 3)}); + auto param = make_shared(element::f32, + PartialShape{Dimension(1, 5), Dimension(0, 4), Dimension(2, 3)}); auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto mul = make_shared(op::Constant::create(element::f32, {}, {2}), cast_fp); - auto div = make_shared(op::Constant::create(element::f32, {3}, {10, 16, 12}), mul); - auto cast_int = make_shared(div, element::i32); + auto cast_fp = make_shared(shape_of, element::f32); + auto mul = make_shared(ov::op::v0::Constant::create(element::f32, {}, {2}), cast_fp); + auto div = make_shared(ov::op::v0::Constant::create(element::f32, {3}, {10, 16, 12}), mul); + auto cast_int = make_shared(div, element::i32); auto r = make_shared(param, cast_int, false); @@ -140,12 +152,12 @@ TEST(type_prop, interval_value_propagation_mul_div_rhs_shape) { } TEST(type_prop, interval_value_propagation_mul_div_lhs_scalar) { - auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); + auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto mul = make_shared(op::Constant::create(element::f32, {}, {2}), cast_fp); - auto div = make_shared(mul, op::Constant::create(element::f32, {3}, {2, 1, 3})); - auto cast_int = make_shared(div, element::i32); + auto cast_fp = make_shared(shape_of, element::f32); + auto mul = make_shared(ov::op::v0::Constant::create(element::f32, {}, {2}), cast_fp); + auto div = make_shared(mul, ov::op::v0::Constant::create(element::f32, {3}, {2, 1, 3})); + auto cast_int = make_shared(div, element::i32); auto r = make_shared(param, cast_int, false); @@ -154,12 +166,12 @@ TEST(type_prop, interval_value_propagation_mul_div_lhs_scalar) { } TEST(type_prop, interval_value_propagation_mul_div_rhs_scalar) { - auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); + auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto mul = make_shared(cast_fp, op::Constant::create(element::f32, {}, {2})); - auto div = make_shared(mul, op::Constant::create(element::f32, {3}, {2, 1, 3})); - auto cast_int = make_shared(div, element::i32); + auto cast_fp = make_shared(shape_of, element::f32); + auto mul = make_shared(cast_fp, ov::op::v0::Constant::create(element::f32, {}, {2})); + auto div = make_shared(mul, ov::op::v0::Constant::create(element::f32, {3}, {2, 1, 3})); + auto cast_int = make_shared(div, element::i32); auto r = make_shared(param, cast_int, false); @@ -168,12 +180,12 @@ TEST(type_prop, interval_value_propagation_mul_div_rhs_scalar) { } TEST(type_prop, interval_value_propagation_mul_lhs_1D_div) { - auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); + auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto mul = make_shared(op::Constant::create(element::f32, {1}, {2}), cast_fp); - auto div = make_shared(mul, op::Constant::create(element::f32, {3}, {2, 1, 3})); - auto cast_int = make_shared(div, element::i32); + auto cast_fp = make_shared(shape_of, element::f32); + auto mul = make_shared(ov::op::v0::Constant::create(element::f32, {1}, {2}), cast_fp); + auto div = make_shared(mul, ov::op::v0::Constant::create(element::f32, {3}, {2, 1, 3})); + auto cast_int = make_shared(div, element::i32); auto r = make_shared(param, cast_int, false); @@ -182,12 +194,12 @@ TEST(type_prop, interval_value_propagation_mul_lhs_1D_div) { } TEST(type_prop, interval_value_propagation_mul_rhs_1D_div) { - auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); + auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto mul = make_shared(cast_fp, op::Constant::create(element::f32, {1}, {2})); - auto div = make_shared(mul, op::Constant::create(element::f32, {3}, {2, 1, 3})); - auto cast_int = make_shared(div, element::i32); + auto cast_fp = make_shared(shape_of, element::f32); + auto mul = make_shared(cast_fp, ov::op::v0::Constant::create(element::f32, {1}, {2})); + auto div = make_shared(mul, ov::op::v0::Constant::create(element::f32, {3}, {2, 1, 3})); + auto cast_int = make_shared(div, element::i32); auto r = make_shared(param, cast_int, false); @@ -196,12 +208,12 @@ TEST(type_prop, interval_value_propagation_mul_rhs_1D_div) { } TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) { - auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); + auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto mul = make_shared(cast_fp, op::Constant::create(element::f32, {1}, {2})); - auto div = make_shared(op::Constant::create(element::f32, {}, {192}), mul); - auto cast_int = make_shared(div, element::i32); + auto cast_fp = make_shared(shape_of, element::f32); + auto mul = make_shared(cast_fp, ov::op::v0::Constant::create(element::f32, {1}, {2})); + auto div = make_shared(ov::op::v0::Constant::create(element::f32, {}, {192}), mul); + auto cast_int = make_shared(div, element::i32); auto r = make_shared(param, cast_int, false); @@ -210,9 +222,10 @@ TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) { } TEST(type_prop, interval_value_propagation_reduce) { - auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); + auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); auto shape_of = make_shared(param); - auto reduce_prod = make_shared(shape_of, op::Constant::create(element::i64, {1}, {0}), true); + auto reduce_prod = + make_shared(shape_of, ov::op::v0::Constant::create(element::i64, {1}, {0}), true); auto r = make_shared(param, reduce_prod, false); ASSERT_EQ(r->get_element_type(), element::f32); @@ -220,16 +233,17 @@ TEST(type_prop, interval_value_propagation_reduce) { } TEST(type_prop, interval_value_propagation_reshape_zero_special_value) { - auto param = make_shared(element::f32, - PartialShape{Dimension(1, 8), Dimension(16, 64), 3, Dimension(200, 400)}); + auto param = + make_shared(element::f32, + PartialShape{Dimension(1, 8), Dimension(16, 64), 3, Dimension(200, 400)}); auto shape_of = make_shared(param); auto dim_021 = make_shared(shape_of, - op::Constant::create(element::i64, {3}, {0, 2, 1}), - op::Constant::create(element::i64, {}, {0})); - auto dim_3 = op::Constant::create(element::i64, {1}, {0}); + ov::op::v0::Constant::create(element::i64, {3}, {0, 2, 1}), + ov::op::v0::Constant::create(element::i64, {}, {0})); + auto dim_3 = ov::op::v0::Constant::create(element::i64, {1}, {0}); - auto shape = make_shared(OutputVector{dim_021, dim_3}, 0); + auto shape = make_shared(OutputVector{dim_021, dim_3}, 0); auto r = make_shared(param, shape, true); ASSERT_EQ(r->get_element_type(), element::f32); @@ -238,17 +252,18 @@ TEST(type_prop, interval_value_propagation_reshape_zero_special_value) { } TEST(type_prop, interval_value_propagation_reshape_zero_minus_one_special_values) { - auto param = make_shared(element::f32, - PartialShape{Dimension(1, 8), Dimension(16, 64), 6, Dimension(200, 400)}); + auto param = + make_shared(element::f32, + PartialShape{Dimension(1, 8), Dimension(16, 64), 6, Dimension(200, 400)}); auto shape_of = make_shared(param); auto dim_0 = make_shared(shape_of, - op::Constant::create(element::i64, {1}, {1}), - op::Constant::create(element::i64, {}, {0})); - auto dim_1 = op::Constant::create(element::i64, {1}, {0}); - auto dim_2 = op::Constant::create(element::i64, {1}, {-1}); + ov::op::v0::Constant::create(element::i64, {1}, {1}), + ov::op::v0::Constant::create(element::i64, {}, {0})); + auto dim_1 = ov::op::v0::Constant::create(element::i64, {1}, {0}); + auto dim_2 = ov::op::v0::Constant::create(element::i64, {1}, {-1}); - auto shape = make_shared(OutputVector{dim_0, dim_1, dim_2}, 0); + auto shape = make_shared(OutputVector{dim_0, dim_1, dim_2}, 0); auto r = make_shared(param, shape, true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), @@ -256,51 +271,53 @@ TEST(type_prop, interval_value_propagation_reshape_zero_minus_one_special_values } TEST(type_prop, reshape_deduce_s2t) { - auto param = make_shared(element::f32, Shape{}); - auto r = make_shared(param, op::Constant::create(element::u64, {1}, Shape{1}), false); + auto param = make_shared(element::f32, Shape{}); + auto r = make_shared(param, ov::op::v0::Constant::create(element::u64, {1}, Shape{1}), false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{1})); } TEST(type_prop, reshape_deduce_s2m) { - auto param = make_shared(element::f32, Shape{}); - auto r = make_shared(param, op::Constant::create(element::u64, {2}, Shape{1, 1}), false); + auto param = make_shared(element::f32, Shape{}); + auto r = make_shared(param, ov::op::v0::Constant::create(element::u64, {2}, Shape{1, 1}), false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{1, 1})); } TEST(type_prop, reshape_deduce_s2m3) { - auto param = make_shared(element::f32, Shape{}); - auto r = make_shared(param, op::Constant::create(element::u64, {3}, Shape{1, 1, 1}), false); + auto param = make_shared(element::f32, Shape{}); + auto r = + make_shared(param, ov::op::v0::Constant::create(element::u64, {3}, Shape{1, 1, 1}), false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{1, 1, 1})); } TEST(type_prop, reshape_deduce_2d_to_1d) { - auto param = make_shared(element::f32, Shape{3, 4}); - auto r = make_shared(param, op::Constant::create(element::u64, {1}, Shape{12}), false); + auto param = make_shared(element::f32, Shape{3, 4}); + auto r = make_shared(param, ov::op::v0::Constant::create(element::u64, {1}, Shape{12}), false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{12})); } TEST(type_prop, reshape_deduce_3d_to_1d) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); - auto r = make_shared(param, op::Constant::create(element::u64, {1}, Shape{60}), false); + auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto r = make_shared(param, ov::op::v0::Constant::create(element::u64, {1}, Shape{60}), false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{60})); } TEST(type_prop, reshape_deduce_zero_special) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); - auto r = make_shared(param, op::Constant::create(element::u64, {3}, Shape{6, 2, 0}), true); + auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto r = make_shared(param, ov::op::v0::Constant::create(element::u64, {3}, Shape{6, 2, 0}), true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{6, 2, 5})); } TEST(type_prop, reshape_deduce_wrong_output_shape) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::f32, Shape{3, 4, 5}); try { - auto r = make_shared(param, op::Constant::create(element::u64, {3}, Shape{3, 3, 3}), false); + auto r = + make_shared(param, ov::op::v0::Constant::create(element::u64, {3}, Shape{3, 3, 3}), false); // Should have thrown, so fail if it didn't FAIL() << "No exception was thrown"; } catch (const NodeValidationFailure& error) { @@ -314,8 +331,9 @@ TEST(type_prop, reshape_deduce_wrong_output_shape) { // Input shape rank dynamic, so we should set the desired output shape // TEST(type_prop, reshape_partial_rank_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic()); - auto r = make_shared(param, op::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false); + auto param = make_shared(element::f32, PartialShape::dynamic()); + auto r = + make_shared(param, ov::op::v0::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2})); @@ -326,8 +344,9 @@ TEST(type_prop, reshape_partial_rank_dynamic) { // TEST(type_prop, reshape_partial_rank_static) { auto param_shape = PartialShape{Dimension::dynamic(), 6, Dimension::dynamic(), Dimension::dynamic()}; - auto param = make_shared(element::f32, param_shape); - auto r = make_shared(param, op::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false); + auto param = make_shared(element::f32, param_shape); + auto r = + make_shared(param, ov::op::v0::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2})); @@ -339,271 +358,301 @@ TEST(type_prop, reshape_partial_rank_static) { // TEST(type_prop, reshape_partial_rank_static_dynamic_but_zero_ok) { auto param_shape = PartialShape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; - auto param = make_shared(element::f32, PartialShape::dynamic()); - auto r = make_shared(param, op::Constant::create(element::u64, {4}, Shape{3, 1, 0, 2}), false); + auto param = make_shared(element::f32, PartialShape::dynamic()); + auto r = + make_shared(param, ov::op::v0::Constant::create(element::u64, {4}, Shape{3, 1, 0, 2}), false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 0, 2})); } TEST(type_prop, reshape_deduce_special_zero_shape_neg_zero) { - auto param = make_shared(element::f32, Shape{3, 1, 2}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{-1, 0}), true); + auto param = make_shared(element::f32, Shape{3, 1, 2}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{-1, 0}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{6, 1})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_neg) { - auto param = make_shared(element::f32, Shape{3, 1, 2}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, Shape{3, 1, 2}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{3, 2})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_neg_copy_input) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, Shape{3, 1}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{3, 1})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_zero_one_neg) { - auto param = make_shared(element::f32, Shape{2, 2, 3}); - auto r = make_shared(param, - op::Constant::create(element::i64, {4}, std::vector{0, 0, 1, -1}), - true); + auto param = make_shared(element::f32, Shape{2, 2, 3}); + auto r = + make_shared(param, + ov::op::v0::Constant::create(element::i64, {4}, std::vector{0, 0, 1, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_shape(), (Shape{2, 2, 1, 3})); } TEST(type_prop, reshape_deduce_special_zero_shape_neg_zero_dynamic) { - auto param = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1, 2}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{-1, 0}), true); + auto param = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1, 2}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{-1, 0}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), 1})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_neg_dynamic) { - auto param = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1, 1}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1, 1}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), 1})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_zero_one_neg_dynamic) { - auto param = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto r = make_shared(param, - op::Constant::create(element::i64, {4}, std::vector{0, 0, 1, -1}), - true); + auto param = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto r = + make_shared(param, + ov::op::v0::Constant::create(element::i64, {4}, std::vector{0, 0, 1, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{2, Dimension::dynamic(), 1, 3})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_neg_copy_input_dynamic) { - auto param = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), 1})); } TEST(type_prop, reshape_partial_rank_dynamic_special_zero) { - auto param = make_shared(element::f32, PartialShape::dynamic()); - auto r = make_shared(param, - op::Constant::create(element::i64, {4}, std::vector{3, 1, 0, 2}), - true); + auto param = make_shared(element::f32, PartialShape::dynamic()); + auto r = + make_shared(param, + ov::op::v0::Constant::create(element::i64, {4}, std::vector{3, 1, 0, 2}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{3, 1, Dimension::dynamic(), 2})); } TEST(type_prop, reshape_partial_rank_dynamic_special_neg) { - auto param = make_shared(element::f32, PartialShape::dynamic()); - auto r = make_shared(param, - op::Constant::create(element::i64, {4}, std::vector{3, -1, 0, 2}), - true); + auto param = make_shared(element::f32, PartialShape::dynamic()); + auto r = + make_shared(param, + ov::op::v0::Constant::create(element::i64, {4}, std::vector{3, -1, 0, 2}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{3, Dimension::dynamic(), Dimension::dynamic(), 2})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_zero_one_neg_dynamic_with_interval) { - auto param = make_shared(element::f32, PartialShape{2, Dimension(1, 3), 3}); - auto r = make_shared(param, - op::Constant::create(element::i64, {4}, std::vector{0, 0, 1, -1}), - true); + auto param = make_shared(element::f32, PartialShape{2, Dimension(1, 3), 3}); + auto r = + make_shared(param, + ov::op::v0::Constant::create(element::i64, {4}, std::vector{0, 0, 1, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{2, Dimension(1, 3), 1, 3})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_zero_one_neg_double_dynamic_with_interval) { - auto param = make_shared(element::f32, PartialShape{2, Dimension(1, 3), Dimension::dynamic()}); - auto r = make_shared(param, - op::Constant::create(element::i64, {4}, std::vector{0, 0, 1, -1}), - true); + auto param = + make_shared(element::f32, PartialShape{2, Dimension(1, 3), Dimension::dynamic()}); + auto r = + make_shared(param, + ov::op::v0::Constant::create(element::i64, {4}, std::vector{0, 0, 1, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{2, Dimension(1, 3), 1, Dimension::dynamic()})); } TEST(type_prop, reshape_deduce_special_zero_shape_zero_neg_dynamic_with_interval) { - auto param = make_shared(element::f32, PartialShape{2, Dimension(1, 3)}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, PartialShape{2, Dimension(1, 3)}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{2, Dimension(1, 3)})); } TEST(type_prop, reshape_deduce_special_zero_shape_neg_zero_dynamic_with_interval) { - auto param = make_shared(element::f32, PartialShape{2, Dimension(1, 3)}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{-1, 0}), true); + auto param = make_shared(element::f32, PartialShape{2, Dimension(1, 3)}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{-1, 0}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{2, Dimension(1, 3)})); } TEST(type_prop, reshape_deduce_special_zero_shape_neg_zero_dynamic_with_interval_1) { - auto param = make_shared(element::f32, PartialShape{Dimension(1, 3), 2}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{-1, 0}), true); + auto param = make_shared(element::f32, PartialShape{Dimension(1, 3), 2}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{-1, 0}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{Dimension(1, 3), 2})); } TEST(type_prop, reshape_pass_interval_dimension_through_minus_one) { - auto param = make_shared(element::f32, PartialShape{1, Dimension(1, 3), 2}); - auto r = make_shared(param, - op::Constant::create(element::i64, {3}, std::vector{0, -1, 2}), - true); + auto param = make_shared(element::f32, PartialShape{1, Dimension(1, 3), 2}); + auto r = + make_shared(param, + ov::op::v0::Constant::create(element::i64, {3}, std::vector{0, -1, 2}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{1, Dimension(1, 3), 2})); } TEST(type_prop, reshape_multiply_interval_by_defined_dim_for_minus_one) { - auto param = make_shared(element::f32, PartialShape{1, Dimension(1, 3), 2}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, PartialShape{1, Dimension(1, 3), 2}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{1, Dimension(2, 6)})); } TEST(type_prop, reshape_multiply_interval_by_interval_for_minus_one) { - auto param = make_shared(element::f32, PartialShape{1, Dimension(1, 3), Dimension(1, 6)}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, PartialShape{1, Dimension(1, 3), Dimension(1, 6)}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{1, Dimension(1, 18)})); } TEST(type_prop, reshape_multiply_interval_by_interval_divide_by_defined_dim_for_minus_one) { - auto param = make_shared(element::f32, PartialShape{1, Dimension(1, 3), 3, Dimension(1, 6)}); - auto r = make_shared(param, - op::Constant::create(element::i64, {3}, std::vector{0, -1, 3}), - true); + auto param = make_shared(element::f32, PartialShape{1, Dimension(1, 3), 3, Dimension(1, 6)}); + auto r = + make_shared(param, + ov::op::v0::Constant::create(element::i64, {3}, std::vector{0, -1, 3}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{1, Dimension(1, 18), 3})); } TEST(type_prop, reshape_multiply_interval_by_interval_divide_by_interval_for_minus_one) { - auto param = make_shared(element::f32, PartialShape{1, -1, Dimension(1, 6)}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, PartialShape{1, -1, Dimension(1, 6)}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic()})); } TEST(type_prop, reshape_multiply_interval_by_interval_divide_by_interval_for_minus_one_zero_included_in_input) { - auto param = make_shared(element::f32, PartialShape{1, -1, Dimension(0, 6)}); - auto r = - make_shared(param, op::Constant::create(element::i64, {2}, std::vector{0, -1}), true); + auto param = make_shared(element::f32, PartialShape{1, -1, Dimension(0, 6)}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {2}, std::vector{0, -1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic()})); } TEST(type_prop, reshape_multiply_intervals_by_interval) { - auto param = - make_shared(element::f32, PartialShape{Dimension(1, 2), Dimension(1, 3), Dimension(1, 4)}); - auto r = - make_shared(param, op::Constant::create(element::i64, {1}, std::vector{-1}), true); + auto param = make_shared(element::f32, + PartialShape{Dimension(1, 2), Dimension(1, 3), Dimension(1, 4)}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, std::vector{-1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{Dimension(1, 24)})); } TEST(type_prop, reshape_multiply_intervals_by_interval_zero_included) { - auto param = - make_shared(element::f32, PartialShape{Dimension(0, 2), Dimension(0, 3), Dimension(0, 4)}); - auto r = - make_shared(param, op::Constant::create(element::i64, {1}, std::vector{-1}), true); + auto param = make_shared(element::f32, + PartialShape{Dimension(0, 2), Dimension(0, 3), Dimension(0, 4)}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, std::vector{-1}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_partial_shape(0), (PartialShape{Dimension(0, 24)})); } TEST(type_prop, reshape_to_zero_shape) { - auto param = make_shared(element::f32, Shape{0, 1}); - auto r = - make_shared(param, op::Constant::create(element::i64, {1}, std::vector{0}), false); + auto param = make_shared(element::f32, Shape{0, 1}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, std::vector{0}), + false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_shape(0), (Shape{0})); } TEST(type_prop, reshape_to_zero_shape_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic()); - auto r = - make_shared(param, op::Constant::create(element::i64, {1}, std::vector{0}), false); + auto param = make_shared(element::f32, PartialShape::dynamic()); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, std::vector{0}), + false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_shape(0), (Shape{0})); } TEST(type_prop, reshape_to_zero_shape_incorrect) { - auto param = make_shared(element::f32, Shape{2, 1}); - ASSERT_THROW(const auto unused = - make_shared(param, - op::Constant::create(element::i64, {1}, std::vector{0}), - false), + auto param = make_shared(element::f32, Shape{2, 1}); + ASSERT_THROW(const auto unused = make_shared( + param, + ov::op::v0::Constant::create(element::i64, {1}, std::vector{0}), + false), std::exception); } TEST(type_prop, reshape_to_zero) { - auto param = make_shared(element::f32, Shape{2, 1}); - auto r = - make_shared(param, op::Constant::create(element::i64, {1}, std::vector{0}), true); + auto param = make_shared(element::f32, Shape{2, 1}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, std::vector{0}), + true); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_shape(0), (Shape{2})); } TEST(type_prop, reshape_to_scalar) { - auto param = make_shared(element::f32, Shape{}); - auto r = - make_shared(param, op::Constant::create(element::i64, {}, std::vector{1}), false); + auto param = make_shared(element::f32, Shape{}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {}, std::vector{1}), + false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_shape(0), (Shape{})); } TEST(type_prop, reshape_to_scalar_2) { - auto param = make_shared(element::f32, Shape{}); - auto r = - make_shared(param, op::Constant::create(element::i64, {}, std::vector{1}), false); + auto param = make_shared(element::f32, Shape{}); + auto r = make_shared(param, + ov::op::v0::Constant::create(element::i64, {}, std::vector{1}), + false); ASSERT_EQ(r->get_element_type(), element::f32); ASSERT_EQ(r->get_output_shape(0), (Shape{})); } TEST(type_prop, reshape_to_scalar_3) { - auto param = make_shared(element::f32, Shape{1, 2, 3}); - ASSERT_THROW(const auto unused = - make_shared(param, - op::Constant::create(element::i64, {}, std::vector{100}), - false), + auto param = make_shared(element::f32, Shape{1, 2, 3}); + ASSERT_THROW(const auto unused = make_shared( + param, + ov::op::v0::Constant::create(element::i64, {}, std::vector{100}), + false), std::exception); } TEST(type_prop, dynamic_shape_propagation_with_i32_precision) { - auto param = make_shared(element::f32, PartialShape{1, -1, -1}); + auto param = make_shared(element::f32, PartialShape{1, -1, -1}); auto shape_of = std::make_shared(param, element::i32); - auto indices = op::Constant::create(element::i32, {3}, {1, 2, 0}); - auto axis = op::Constant::create(element::i32, {1}, {0}); + auto indices = ov::op::v0::Constant::create(element::i32, {3}, {1, 2, 0}); + auto axis = ov::op::v0::Constant::create(element::i32, {1}, {0}); auto gather = std::make_shared(shape_of, indices, axis); auto reshape = std::make_shared(param, gather, true); @@ -617,9 +666,9 @@ TEST(type_prop, reshape_dynamic_value_and_label_propagation) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); - auto shape_0 = std::make_shared(param_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); + auto shape_0 = std::make_shared(param_0); const auto& et = element::i64; std::vector zero{0}; @@ -643,8 +692,8 @@ TEST(type_prop, reshape_label_shape_propagation_minus_one) { PartialShape initial_shape = PartialShape{marked_0, 4, 3, 1}; - auto input = std::make_shared(element::f32, initial_shape); - auto output_pattern = std::make_shared(element::i64, Shape{2}, std::vector{-1, 12}); + auto input = std::make_shared(element::f32, initial_shape); + auto output_pattern = std::make_shared(element::i64, Shape{2}, std::vector{-1, 12}); const auto reshape = std::make_shared(input, output_pattern, false); diff --git a/src/core/tests/type_prop/result.cpp b/src/core/tests/type_prop/result.cpp index 8100d14c2cdb7f..1f9f60d40f27c0 100644 --- a/src/core/tests/type_prop/result.cpp +++ b/src/core/tests/type_prop/result.cpp @@ -2,36 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/result.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset1.hpp" +#include "openvino/op/constant.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, result) { const auto arg_shape = Shape{1, 2, 3, 4, 5}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); - auto result = make_shared(arg); + auto result = make_shared(arg); EXPECT_EQ(result->get_output_element_type(0), element::f32); EXPECT_EQ(result->get_output_shape(0), arg_shape); } TEST(type_prop, result_dynamic_shape) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); + auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto result = make_shared(arg); + auto result = make_shared(arg); EXPECT_EQ(result->get_output_element_type(0), element::f32); EXPECT_TRUE(result->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, result_layout) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto result = make_shared(a); + auto a = make_shared(element::f32, PartialShape::dynamic()); + auto result = make_shared(a); result->set_layout("NHWC"); EXPECT_EQ(result->get_layout(), "NHWC"); result->set_layout(ov::Layout()); @@ -40,14 +40,14 @@ TEST(type_prop, result_layout) { } TEST(type_prop, result_layout_empty) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto result = make_shared(a); + auto a = make_shared(element::f32, PartialShape::dynamic()); + auto result = make_shared(a); EXPECT_TRUE(result->get_layout().empty()); } TEST(type_prop, result_layout_invalid) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto result = make_shared(a); + auto a = make_shared(element::f32, PartialShape::dynamic()); + auto result = make_shared(a); result->output(0).get_rt_info()[ov::LayoutAttribute::get_type_info_static()] = "NCHW"; // incorrect way ASSERT_THROW(result->get_layout(), ov::Exception); } diff --git a/src/core/tests/type_prop/reverse.cpp b/src/core/tests/type_prop/reverse.cpp index 836407afd198fd..8d2f72ac8438cd 100644 --- a/src/core/tests/type_prop/reverse.cpp +++ b/src/core/tests/type_prop/reverse.cpp @@ -2,25 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/reverse.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/opsets/opset10.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START - using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; class TypePropReverseV1Test : public TypePropOpTest {}; TEST(type_prop, reverse_1d_deduce) { // Deduce type - auto param = make_shared(element::f32, Shape{5}); - auto rev = - make_shared(param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::f32, Shape{5}); + auto rev = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, {0}), + op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); EXPECT_EQ(rev->get_shape(), (Shape{5})); @@ -28,9 +27,10 @@ TEST(type_prop, reverse_1d_deduce) { TEST(type_prop, reverse_2d_deduce_0) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); - auto rev = - make_shared(param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::f32, Shape{5, 6}); + auto rev = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, {0}), + op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6})); @@ -38,9 +38,10 @@ TEST(type_prop, reverse_2d_deduce_0) { TEST(type_prop, reverse_2d_deduce_1) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); - auto rev = - make_shared(param, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::f32, Shape{5, 6}); + auto rev = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, {1}), + op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6})); @@ -48,9 +49,9 @@ TEST(type_prop, reverse_2d_deduce_1) { TEST(type_prop, reverse_2d_deduce_01) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); + auto param = make_shared(element::f32, Shape{5, 6}); auto rev = make_shared(param, - op::Constant::create(element::i64, {2}, {0, 1}), + ov::op::v0::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); @@ -59,9 +60,10 @@ TEST(type_prop, reverse_2d_deduce_01) { TEST(type_prop, reverse_3d_deduce_0) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = - make_shared(param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, {0}), + op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); @@ -69,9 +71,10 @@ TEST(type_prop, reverse_3d_deduce_0) { TEST(type_prop, reverse_3d_deduce_1) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = - make_shared(param, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, {1}), + op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); @@ -79,9 +82,10 @@ TEST(type_prop, reverse_3d_deduce_1) { TEST(type_prop, reverse_3d_deduce_2) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = - make_shared(param, op::Constant::create(element::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + ov::op::v0::Constant::create(element::i64, {1}, {2}), + op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); @@ -89,9 +93,9 @@ TEST(type_prop, reverse_3d_deduce_2) { TEST(type_prop, reverse_3d_deduce_01) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::f32, Shape{5, 6, 7}); auto rev = make_shared(param, - op::Constant::create(element::i64, {2}, {0, 1}), + ov::op::v0::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); @@ -100,9 +104,9 @@ TEST(type_prop, reverse_3d_deduce_01) { TEST(type_prop, reverse_3d_deduce_02) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::f32, Shape{5, 6, 7}); auto rev = make_shared(param, - op::Constant::create(element::i64, {2}, {0, 2}), + ov::op::v0::Constant::create(element::i64, {2}, {0, 2}), op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); @@ -111,9 +115,9 @@ TEST(type_prop, reverse_3d_deduce_02) { TEST(type_prop, reverse_3d_deduce_12) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::f32, Shape{5, 6, 7}); auto rev = make_shared(param, - op::Constant::create(element::i64, {2}, {1, 2}), + ov::op::v0::Constant::create(element::i64, {2}, {1, 2}), op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); @@ -122,9 +126,9 @@ TEST(type_prop, reverse_3d_deduce_12) { TEST(type_prop, reverse_3d_deduce_012) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::f32, Shape{5, 6, 7}); auto rev = make_shared(param, - op::Constant::create(element::i64, {3}, {0, 1, 2}), + ov::op::v0::Constant::create(element::i64, {3}, {0, 1, 2}), op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); @@ -133,10 +137,10 @@ TEST(type_prop, reverse_3d_deduce_012) { TEST(type_prop, reverse_3d_deduce_oob) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::f32, Shape{5, 6, 7}); try { auto rev = make_shared(param, - op::Constant::create(element::i64, {3}, {0, 3, 2}), + ov::op::v0::Constant::create(element::i64, {3}, {0, 3, 2}), op::v1::Reverse::Mode::INDEX); // Should have thrown, so fail if it didn't @@ -154,9 +158,9 @@ TEST(type_prop, reverse_3d_deduce_oob) { // If the input rank is dynamic, we should pass unconditionally. // TEST(type_prop, reverse_partial_rank_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic()); + auto param = make_shared(element::f32, PartialShape::dynamic()); auto rev = make_shared(param, - op::Constant::create(element::i64, {4}, {0, 2, 1776, 90909}), + ov::op::v0::Constant::create(element::i64, {4}, {0, 2, 1776, 90909}), op::v1::Reverse::Mode::INDEX); EXPECT_EQ(rev->get_element_type(), element::f32); @@ -182,7 +186,7 @@ TEST_F(TypePropReverseV1Test, partial_rank_static_dynamic_axes_ok) { TEST_F(TypePropReverseV1Test, axes_index_is_not_1d_tensor) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::f32, param_shape); auto axes = make_shared(element::i64, PartialShape{2, 3}); OV_EXPECT_THROW(auto op = make_op(param, axes, op::v1::Reverse::Mode::INDEX), @@ -192,7 +196,7 @@ TEST_F(TypePropReverseV1Test, axes_index_is_not_1d_tensor) { TEST_F(TypePropReverseV1Test, axes_mask_is_not_1d_tensor) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::f32, param_shape); auto axes = make_shared(element::boolean, PartialShape{2, 3}); OV_EXPECT_THROW(auto op = make_op(param, axes, op::v1::Reverse::Mode::MASK), @@ -202,7 +206,7 @@ TEST_F(TypePropReverseV1Test, axes_mask_is_not_1d_tensor) { TEST_F(TypePropReverseV1Test, axes_mask_length_lt_input_rank) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::f32, param_shape); auto axes = make_shared(element::boolean, PartialShape{2}); OV_EXPECT_THROW( @@ -213,7 +217,7 @@ TEST_F(TypePropReverseV1Test, axes_mask_length_lt_input_rank) { TEST_F(TypePropReverseV1Test, axes_mask_length_gt_input_rank) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::f32, param_shape); auto axes = make_shared(element::boolean, PartialShape{5}); OV_EXPECT_THROW( @@ -264,7 +268,7 @@ TEST_F(TypePropReverseV1Test, axes_index_not_integer_type) { TEST_F(TypePropReverseV1Test, param_static_rank_partial_shape_axes_out_of_input_rank) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::f32, param_shape); OV_EXPECT_THROW( auto op = make_op(param, Constant::create(element::i64, {3}, {0, 4, 2}), op::v1::Reverse::Mode::INDEX), @@ -275,7 +279,7 @@ TEST_F(TypePropReverseV1Test, param_static_rank_partial_shape_axes_out_of_input_ TEST_F(TypePropReverseV1Test, param_static_rank_partial_shape_axes_negatives) { PartialShape param_shape{-1, {2, -1}, {-1, 3}, 5}; set_shape_labels(param_shape, 10); - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::f32, param_shape); auto op = make_op(param, Constant::create(element::i64, {3}, {0, -1, 2}), op::v1::Reverse::Mode::INDEX); @@ -286,7 +290,7 @@ TEST_F(TypePropReverseV1Test, param_static_rank_partial_shape_axes_negatives) { TEST_F(TypePropReverseV1Test, more_axes_index_than_input_rank) { PartialShape param_shape{-1, {2, -1}, {-1, 3}, 5}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::f32, param_shape); auto op = make_op(param, Constant::create(element::i64, {7}, {0, -1, 1, 2, 3, 3, 2}), op::v1::Reverse::Mode::INDEX); diff --git a/src/core/tests/type_prop/rnn_cell.cpp b/src/core/tests/type_prop/rnn_cell.cpp index 311cfe43e83062..5243832c366bf9 100644 --- a/src/core/tests/type_prop/rnn_cell.cpp +++ b/src/core/tests/type_prop/rnn_cell.cpp @@ -3,12 +3,10 @@ // #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset4.hpp" +#include "openvino/opsets/opset4.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, rnn_cell) { const size_t batch_size = 2; diff --git a/src/core/tests/type_prop/rnn_cell_base.cpp b/src/core/tests/type_prop/rnn_cell_base.cpp index b4718c067272ca..1387379976b7a9 100644 --- a/src/core/tests/type_prop/rnn_cell_base.cpp +++ b/src/core/tests/type_prop/rnn_cell_base.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" #include "openvino/openvino.hpp" #include "openvino/opsets/opset12.hpp" diff --git a/src/core/tests/type_prop/rnn_sequence.cpp b/src/core/tests/type_prop/rnn_sequence.cpp index d27496503a84f3..318d5840fb40b1 100644 --- a/src/core/tests/type_prop/rnn_sequence.cpp +++ b/src/core/tests/type_prop/rnn_sequence.cpp @@ -4,11 +4,10 @@ #include "common_test_utils/type_prop.hpp" #include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset5.hpp" +#include "openvino/opsets/opset5.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, rnn_sequence_forward) { const size_t batch_size = 8; @@ -20,7 +19,7 @@ TEST(type_prop, rnn_sequence_forward) { const auto X = make_shared(element::f32, Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); const auto W = make_shared(element::f32, Shape{num_directions, hidden_size, input_size}); const auto R = make_shared(element::f32, Shape{num_directions, hidden_size, hidden_size}); @@ -53,7 +52,7 @@ TEST(type_prop, rnn_sequence_invalid_input) { auto X = make_shared(element::f32, Shape{batch_size, seq_length, input_size}); auto H_t = make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); auto W = make_shared(element::f32, Shape{num_directions, hidden_size, input_size}); auto R = make_shared(element::f32, Shape{num_directions, hidden_size, hidden_size}); @@ -144,7 +143,7 @@ TEST(type_prop, rnn_sequence_dynamic_inputs) { const auto X = make_shared(element::f32, PartialShape{batch_size, seq_length, input_size}); const auto H_t = make_shared(element::f32, PartialShape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); + const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); const auto W = make_shared(element::f32, PartialShape{num_directions, hidden_size, input_size}); const auto R = make_shared(element::f32, PartialShape{num_directions, hidden_size, hidden_size}); @@ -172,7 +171,7 @@ TEST(type_prop, rnn_sequence_dynamic_batch_size) { const auto X = make_shared(element::f32, PartialShape{batch_size, seq_length, input_size}); const auto H_t = make_shared(element::f32, PartialShape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); + const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); const auto W = make_shared(element::f32, PartialShape{num_directions, hidden_size, input_size}); const auto R = make_shared(element::f32, PartialShape{num_directions, hidden_size, hidden_size}); @@ -200,7 +199,7 @@ TEST(type_prop, rnn_sequence_dynamic_input_size) { const auto X = make_shared(element::f32, PartialShape{batch_size, seq_length, input_size}); const auto H_t = make_shared(element::f32, PartialShape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); + const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); const auto W = make_shared(element::f32, PartialShape{num_directions, hidden_size, input_size}); const auto R = make_shared(element::f32, PartialShape{num_directions, hidden_size, hidden_size}); @@ -228,7 +227,7 @@ TEST(type_prop, rnn_sequence_dynamic_hidden_size) { const auto X = make_shared(element::f32, PartialShape{batch_size, seq_length, input_size}); const auto H_t = make_shared(element::f32, PartialShape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); + const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); const auto W = make_shared(element::f32, PartialShape{num_directions, hidden_size, input_size}); const auto R = make_shared(element::f32, PartialShape{num_directions, hidden_size, hidden_size}); @@ -255,7 +254,7 @@ TEST(type_prop, rnn_sequence_dynamic_invalid_input_rank0) { auto X = make_shared(element::f32, Shape{batch_size, seq_length, input_size}); auto H_t = make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); auto W = make_shared(element::f32, Shape{num_directions, hidden_size, input_size}); auto R = make_shared(element::f32, Shape{num_directions, hidden_size, hidden_size}); @@ -312,7 +311,7 @@ TEST(type_prop, rnn_sequence_input_dynamic_rank) { auto X = make_shared(element::f32, PartialShape{batch_size, seq_length, input_size}); auto H_t = make_shared(element::f32, PartialShape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); + const auto sequence_lengths = make_shared(element::i32, PartialShape{batch_size}); auto W = make_shared(element::f32, PartialShape{num_directions, hidden_size, input_size}); auto R = make_shared(element::f32, PartialShape{num_directions, hidden_size, hidden_size}); diff --git a/src/core/tests/type_prop/round.cpp b/src/core/tests/type_prop/round.cpp index 87d2697f92345a..fefb89a7e0aa0d 100644 --- a/src/core/tests/type_prop/round.cpp +++ b/src/core/tests/type_prop/round.cpp @@ -2,53 +2,56 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/round.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/parameter.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, rounding_to_even) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); EXPECT_EQ(round_func->get_element_type(), element::f32); EXPECT_EQ(round_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, rounding_away) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); EXPECT_EQ(round_func->get_element_type(), element::f32); EXPECT_EQ(round_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, rounding_to_even_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); EXPECT_EQ(round_func->get_element_type(), element::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown - auto round_partial = make_shared(make_shared(element::f32, PartialShape::dynamic()), - op::v5::Round::RoundMode::HALF_TO_EVEN); + auto round_partial = + make_shared(make_shared(element::f32, PartialShape::dynamic()), + op::v5::Round::RoundMode::HALF_TO_EVEN); ASSERT_TRUE(round_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, rounding_away_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); EXPECT_EQ(round_func->get_element_type(), element::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown - auto round_partial = make_shared(make_shared(element::f32, PartialShape::dynamic()), - op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); + auto round_partial = + make_shared(make_shared(element::f32, PartialShape::dynamic()), + op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); ASSERT_TRUE(round_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, rounding_to_even_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); EXPECT_EQ(round_func->get_element_type(), element::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); @@ -56,7 +59,7 @@ TEST(type_prop, rounding_to_even_partial_static_rank) { } TEST(type_prop, rounding_away_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); EXPECT_EQ(round_func->get_element_type(), element::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); diff --git a/src/core/tests/type_prop/scatter_elements_update.cpp b/src/core/tests/type_prop/scatter_elements_update.cpp index 7d44b8fc84f4de..99cff60d6162e7 100644 --- a/src/core/tests/type_prop/scatter_elements_update.cpp +++ b/src/core/tests/type_prop/scatter_elements_update.cpp @@ -2,10 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/scatter_elements_update.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" using namespace std; using namespace ov; @@ -191,11 +194,11 @@ TEST(type_prop, scatter_elements_update_mean_reduction_of_bool) { const auto axis = make_shared(element::i32, Shape{1}, std::vector{0}); OV_EXPECT_THROW( - std::ignore = make_shared(data, - indices, - updates, - axis, - op::v12::ScatterElementsUpdate::Reduction::MEAN), + std::ignore = make_shared(data, + indices, + updates, + axis, + op::v12::ScatterElementsUpdate::Reduction::MEAN), NodeValidationFailure, HasSubstr("The 'mean' reduction type is not supported for boolean tensors")); } diff --git a/src/core/tests/type_prop/scatter_nd_update.cpp b/src/core/tests/type_prop/scatter_nd_update.cpp index 0cd989c23e1062..d7e6e5713cf1ac 100644 --- a/src/core/tests/type_prop/scatter_nd_update.cpp +++ b/src/core/tests/type_prop/scatter_nd_update.cpp @@ -2,21 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/scatter_nd_update.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/opsets/opset10.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, scatter_nd_update_v3_fail_indices_element_type) { Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::f16, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::f32, ref_shape); + auto I = make_shared(element::f16, indices_shape); + auto U = make_shared(element::f32, updates_shape); try { auto G = make_shared(R, I, U); // Should have thrown, so fail if it didn't @@ -33,11 +34,11 @@ TEST(type_prop, scatter_nd_update_v3_fail_updates_rank) { Shape indices_shape{1}; Shape updates_shape{3, 3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::f32, ref_shape); + auto I = make_shared(element::i32, indices_shape); + auto U = make_shared(element::f32, updates_shape); try { - auto G = make_shared(R, I, U); + auto G = make_shared(R, I, U); // Should have thrown, so fail if it didn't FAIL() << "Incorrect updates rank"; } catch (const NodeValidationFailure& error) { @@ -54,11 +55,11 @@ TEST(type_prop, scatter_nd_update_fail_updates_element_type) { Shape indices_shape{1}; Shape updates_shape{3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::i32, updates_shape); + auto R = make_shared(element::f32, ref_shape); + auto I = make_shared(element::i32, indices_shape); + auto U = make_shared(element::i32, updates_shape); try { - auto G = make_shared(R, I, U); + auto G = make_shared(R, I, U); // Should have thrown, so fail if it didn't FAIL() << "Created ScatterND op with incorrect updates element type."; } catch (const NodeValidationFailure& error) { @@ -73,11 +74,11 @@ TEST(type_prop, scatter_nd_update_fail_updates_shape) { Shape indices_shape{1}; Shape updates_shape{2, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::f32, ref_shape); + auto I = make_shared(element::i32, indices_shape); + auto U = make_shared(element::f32, updates_shape); try { - auto G = make_shared(R, I, U); + auto G = make_shared(R, I, U); // Should have thrown, so fail if it didn't FAIL() << "Incorrect updates shape"; } catch (const NodeValidationFailure& error) { @@ -94,11 +95,11 @@ TEST(type_prop, scatter_nd_update_fail_indices_last_dim) { Shape indices_shape{2, 4}; Shape updates_shape{2, 3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::f32, ref_shape); + auto I = make_shared(element::i32, indices_shape); + auto U = make_shared(element::f32, updates_shape); try { - auto G = make_shared(R, I, U); + auto G = make_shared(R, I, U); // Should have thrown, so fail if it didn't FAIL() << "Incorrect indices innermost dim"; } catch (const NodeValidationFailure& error) { @@ -200,10 +201,10 @@ TEST_F(TypePropScatterUpdateNDV3Test, preserve_partial_values_and_labels_via_eva auto u_shape = PartialShape{{10, 20}, {3, 4}}; set_shape_labels(u_shape, 20); - const auto shape_of_u = std::make_shared(std::make_shared(element::i64, u_shape)); + const auto shape_of_u = std::make_shared(std::make_shared(element::i64, u_shape)); const auto op = make_op(d, i, shape_of_u); - auto param = std::make_shared(element::f32, PartialShape{1}); + auto param = std::make_shared(element::f32, PartialShape{1}); auto bc = std::make_shared(param, op, op::BroadcastType::BIDIRECTIONAL); EXPECT_EQ(bc->get_output_partial_shape(0), PartialShape({{3, 4}, 3, {10, 20}, 4})); diff --git a/src/core/tests/type_prop/scatter_update.cpp b/src/core/tests/type_prop/scatter_update.cpp index 9618db62e81a8e..d5ad0043aabdee 100644 --- a/src/core/tests/type_prop/scatter_update.cpp +++ b/src/core/tests/type_prop/scatter_update.cpp @@ -2,14 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/scatter_update.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/strided_slice.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; namespace { @@ -18,10 +23,10 @@ void type_check(const type& refType) { Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(refType, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(refType, updates_shape); - auto A = op::Constant::create(element::i32, Shape{1}, {1}); + auto R = make_shared(refType, ref_shape); + auto I = make_shared(element::i32, indices_shape); + auto U = make_shared(refType, updates_shape); + auto A = ov::op::v0::Constant::create(element::i32, Shape{1}, {1}); auto scatter_update = make_shared(R, I, U, A); EXPECT_EQ(scatter_update->get_output_element_type(0), refType); EXPECT_EQ(scatter_update->get_output_shape(0), ref_shape); @@ -35,10 +40,10 @@ void incorrect_type_check(const type& refType, Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(refType, ref_shape); - auto I = make_shared(indicesType, indices_shape); - auto U = make_shared(updatesType, updates_shape); - auto A = op::Constant::create(axisType, Shape{1}, {1}); + auto R = make_shared(refType, ref_shape); + auto I = make_shared(indicesType, indices_shape); + auto U = make_shared(updatesType, updates_shape); + auto A = ov::op::v0::Constant::create(axisType, Shape{1}, {1}); try { auto G = make_shared(R, I, U, A); // Should have thrown, so fail if it didn't @@ -59,10 +64,10 @@ void incorrect_shape_check(const Shape& refShape, Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::f32, refShape); - auto I = make_shared(element::i32, indicesShape); - auto U = make_shared(element::f32, updatesShape); - auto A = op::Constant::create(element::i32, axisShape, {axisVal}); + auto R = make_shared(element::f32, refShape); + auto I = make_shared(element::i32, indicesShape); + auto U = make_shared(element::f32, updatesShape); + auto A = ov::op::v0::Constant::create(element::i32, axisShape, {axisVal}); try { auto G = make_shared(R, I, U, A); // Should have thrown, so fail if it didn't @@ -201,10 +206,10 @@ TEST(type_prop, scatter_update_v3_dynamic_data_shape) { PartialShape ref_shape = PartialShape::dynamic(); Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::i8, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::i8, updates_shape); - auto A = op::Constant::create(element::i16, Shape{}, {1}); + auto R = make_shared(element::i8, ref_shape); + auto I = make_shared(element::i16, indices_shape); + auto U = make_shared(element::i8, updates_shape); + auto A = ov::op::v0::Constant::create(element::i16, Shape{}, {1}); auto scatter_update = make_shared(R, I, U, A); EXPECT_EQ(scatter_update->get_output_element_type(0), element::i8); @@ -219,10 +224,10 @@ TEST(type_prop, scatter_update_v3_interval_label_data_shape) { Shape indices_shape{2, 1}; Shape updates_shape{3, 2, 1, 2, 4}; - auto data = make_shared(element::f32, data_shape); - auto idx = make_shared(element::i32, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = op::Constant::create(element::i32, Shape{}, {1}); + auto data = make_shared(element::f32, data_shape); + auto idx = make_shared(element::i32, indices_shape); + auto updates = make_shared(element::f32, updates_shape); + auto axis = ov::op::v0::Constant::create(element::i32, Shape{}, {1}); auto scatter_update = make_shared(data, idx, updates, axis); @@ -238,14 +243,15 @@ TEST(type_prop, scatter_update_v3_value_label_propagation) { ov::DimensionTracker::set_label(labeled_dim, label); PartialShape data_shape = PartialShape{labeled_dim}; - auto data = make_shared(element::i8, data_shape); + auto data = make_shared(element::i8, data_shape); auto shape_of = make_shared(data); - auto scatter_update = make_shared(op::Constant::create(element::i64, Shape{2}, {1, 0}), - op::Constant::create(element::i64, Shape{1}, {1}), - shape_of, - op::Constant::create(element::i64, Shape{1}, {0})); + auto scatter_update = + make_shared(ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 0}), + ov::op::v0::Constant::create(element::i64, Shape{1}, {1}), + shape_of, + ov::op::v0::Constant::create(element::i64, Shape{1}, {0})); auto broadcast = - make_shared(op::Constant::create(element::i64, Shape{1, 1}, {4}), scatter_update); + make_shared(ov::op::v0::Constant::create(element::i64, Shape{1, 1}, {4}), scatter_update); const auto& output_shape = broadcast->get_output_partial_shape(0); EXPECT_EQ(output_shape, PartialShape({1, {5, 7}})); @@ -255,18 +261,19 @@ TEST(type_prop, scatter_update_v3_value_label_propagation) { TEST(type_prop, scatter_update_v3_partial_value_propagation) { // strided slice should take from 5 to 7 elements from the 10 elements in the input data - auto input = make_shared(element::i8, PartialShape{ov::Dimension(5, 7)}); + auto input = make_shared(element::i8, PartialShape{ov::Dimension(5, 7)}); auto shape = make_shared(input); - auto scatter_update = make_shared(op::Constant::create(element::i64, Shape{2}, {1, 0}), - op::Constant::create(element::i64, Shape{1}, {1}), - shape, - op::Constant::create(element::i64, Shape{1}, {0})); + auto scatter_update = + make_shared(ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 0}), + ov::op::v0::Constant::create(element::i64, Shape{1}, {1}), + shape, + ov::op::v0::Constant::create(element::i64, Shape{1}, {0})); const auto& masks = std::vector(0, 2); const auto& strided_slice = make_shared( - op::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}), - op::Constant::create(element::i64, Shape{2}, {0, 0}), + ov::op::v0::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}), + ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 0}), scatter_update, - op::Constant::create(element::i64, Shape{2}, {1, 1}), + ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 1}), masks, masks); diff --git a/src/core/tests/type_prop/select.cpp b/src/core/tests/type_prop/select.cpp index 6005561444019d..c2e3ec86639f48 100644 --- a/src/core/tests/type_prop/select.cpp +++ b/src/core/tests/type_prop/select.cpp @@ -2,29 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/select.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START - using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, select_deduce) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); ASSERT_EQ(bc->get_element_type(), element::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 4})); } TEST(type_prop, select_default_constructor) { - auto cond_param = make_shared(element::boolean, Shape{2, 4}); - auto then_param = make_shared(element::f32, Shape{2, 4}); - auto else_param = make_shared(element::f32, Shape{2, 4}); + auto cond_param = make_shared(element::boolean, Shape{2, 4}); + auto then_param = make_shared(element::f32, Shape{2, 4}); + auto else_param = make_shared(element::f32, Shape{2, 4}); auto op = make_shared(); EXPECT_EQ(op->get_auto_broadcast().m_type, op::AutoBroadcastType::NUMPY); @@ -47,9 +45,9 @@ TEST(type_prop, select_labels_cond_numpy) { set_shape_labels(labeled_shape, 10); ov::TensorLabel expected_labels{10, 11, 12, ov::no_label, 14}; - auto cond_param = make_shared(element::boolean, labeled_shape); - auto then_param = make_shared(element::f32, PartialShape::dynamic(5)); - auto else_param = make_shared(element::f32, PartialShape({{1, 5}, {1, 11}, 5, {1, 8}})); + auto cond_param = make_shared(element::boolean, labeled_shape); + auto then_param = make_shared(element::f32, PartialShape::dynamic(5)); + auto else_param = make_shared(element::f32, PartialShape({{1, 5}, {1, 11}, 5, {1, 8}})); auto op = make_shared(cond_param, then_param, else_param); const auto& out_shape = op->get_output_partial_shape(0); @@ -65,9 +63,9 @@ TEST(type_prop, select_labels_then_numpy) { ov::TensorLabel expected_labels{ov::no_label, ov::no_label, 12, ov::no_label, 14}; auto cond_param = - make_shared(element::boolean, PartialShape{{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}}); - auto then_param = make_shared(element::f32, labeled_shape); - auto else_param = make_shared(element::f32, PartialShape({{1, 5}, {1, 11}, 5, {1, 8}})); + make_shared(element::boolean, PartialShape{{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}}); + auto then_param = make_shared(element::f32, labeled_shape); + auto else_param = make_shared(element::f32, PartialShape({{1, 5}, {1, 11}, 5, {1, 8}})); auto op = make_shared(cond_param, then_param, else_param); const auto& out_shape = op->get_output_partial_shape(0); @@ -84,9 +82,9 @@ TEST(type_prop, select_labels_else_numpy) { ov::TensorLabel expected_labels{ov::no_label, ov::no_label, 11, 12, 13}; auto cond_param = - make_shared(element::boolean, PartialShape{{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}}); - auto then_param = make_shared(element::f32, PartialShape::dynamic(5)); - auto else_param = make_shared(element::f32, labeled_shape); + make_shared(element::boolean, PartialShape{{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}}); + auto then_param = make_shared(element::f32, PartialShape::dynamic(5)); + auto else_param = make_shared(element::f32, labeled_shape); auto op = make_shared(cond_param, then_param, else_param); const auto& out_shape = op->get_output_partial_shape(0); @@ -107,9 +105,9 @@ TEST(type_prop, select_labels_all_params_numpy) { ov::TensorLabel expected_labels{10, 11, 22, 13, 34, 15, 26, 17, 18}; - auto cond_param = make_shared(element::boolean, labeled_shape_cond); - auto then_param = make_shared(element::f32, labeled_shape_then); - auto else_param = make_shared(element::f32, labeled_shape_else); + auto cond_param = make_shared(element::boolean, labeled_shape_cond); + auto then_param = make_shared(element::f32, labeled_shape_then); + auto else_param = make_shared(element::f32, labeled_shape_else); auto op = make_shared(cond_param, then_param, else_param); const auto& out_shape = op->get_output_partial_shape(0); @@ -130,9 +128,9 @@ TEST(type_prop, select_labels_all_params_none) { ov::TensorLabel expected_labels{10, 11, 12, 13, 14, 15, 16, 17, 18}; - auto cond_param = make_shared(element::boolean, labeled_shape_cond); - auto then_param = make_shared(element::f32, labeled_shape_then); - auto else_param = make_shared(element::f32, labeled_shape_else); + auto cond_param = make_shared(element::boolean, labeled_shape_cond); + auto then_param = make_shared(element::f32, labeled_shape_then); + auto else_param = make_shared(element::f32, labeled_shape_else); auto op = make_shared(cond_param, then_param, else_param, op::AutoBroadcastType::NONE); const auto& out_shape = op->get_output_partial_shape(0); @@ -153,9 +151,9 @@ TEST(type_prop, select_labels_all_params_pdpd) { ov::TensorLabel expected_labels{10, 11, 22, 33, 24, 25, 26, 17, 18}; - auto cond_param = make_shared(element::boolean, labeled_shape_cond); - auto then_param = make_shared(element::f32, labeled_shape_then); - auto else_param = make_shared(element::f32, labeled_shape_else); + auto cond_param = make_shared(element::boolean, labeled_shape_cond); + auto then_param = make_shared(element::f32, labeled_shape_then); + auto else_param = make_shared(element::f32, labeled_shape_else); auto op = make_shared(cond_param, then_param, else_param, @@ -170,18 +168,18 @@ TEST(type_prop, select_labels_all_params_pdpd) { TEST(type_prop, select_dynamic) { auto param_0 = - make_shared(element::boolean, PartialShape({{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}})); - auto param_1 = make_shared(element::f32, PartialShape::dynamic(5)); - auto param_2 = make_shared(element::f32, PartialShape({{1, 5}, {1, 11}, 5, {1, 8}})); + make_shared(element::boolean, PartialShape({{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}})); + auto param_1 = make_shared(element::f32, PartialShape::dynamic(5)); + auto param_2 = make_shared(element::f32, PartialShape({{1, 5}, {1, 11}, 5, {1, 8}})); auto bc = make_shared(param_0, param_1, param_2); ASSERT_EQ(bc->get_element_type(), element::f32); ASSERT_EQ(bc->get_output_partial_shape(0), PartialShape({{2, 8}, {3, 7}, -1, 5, -1})); } TEST(type_prop, select_shape_mismatch_a) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{3, 5}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{3, 5}); + auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); // Should have thrown, so fail if it didn't @@ -194,9 +192,9 @@ TEST(type_prop, select_shape_mismatch_a) { } TEST(type_prop, select_shape_mismatch_b) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{3, 5}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::f32, Shape{3, 5}); + auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); // Should have thrown, so fail if it didn't @@ -209,9 +207,9 @@ TEST(type_prop, select_shape_mismatch_b) { } TEST(type_prop, select_shape_mismatch_c) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{3, 5}); + auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::f32, Shape{3, 5}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); // Should have thrown, so fail if it didn't @@ -224,9 +222,9 @@ TEST(type_prop, select_shape_mismatch_c) { } TEST(type_prop, select_elem_mismatch_a) { - auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); // Should have thrown, so fail if it didn't @@ -239,9 +237,9 @@ TEST(type_prop, select_elem_mismatch_a) { } TEST(type_prop, select_elem_mismatch_bc) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); // Should have thrown, so fail if it didn't @@ -254,9 +252,9 @@ TEST(type_prop, select_elem_mismatch_bc) { } TEST(type_prop, select_partial_all_rank_dynamic) { - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::boolean, PartialShape::dynamic()); + auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); @@ -265,9 +263,9 @@ TEST(type_prop, select_partial_all_rank_dynamic) { } TEST(type_prop, select_partial_all_rank_dynamic_arg0_et_dynamic_arg1_arg2_et_mismatch) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::i32, PartialShape::dynamic()); + auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::i32, PartialShape::dynamic()); try { auto sel = make_shared(param0, param1, param2); @@ -281,9 +279,9 @@ TEST(type_prop, select_partial_all_rank_dynamic_arg0_et_dynamic_arg1_arg2_et_mis } TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param2 = make_shared(element::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); @@ -292,9 +290,9 @@ TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_et_dynamic) { } TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg2_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::dynamic, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); @@ -303,9 +301,9 @@ TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg2_et_dynamic) { } TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_arg2_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param2 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param2 = make_shared(element::dynamic, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); @@ -314,10 +312,12 @@ TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_arg2_et_dynamic) { } TEST(type_prop, select_partial_all_rank_static_dynamic_ok) { - auto param0 = - make_shared(element::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); - auto param1 = make_shared(element::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); - auto param2 = make_shared(element::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3}); + auto param0 = make_shared(element::boolean, + PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + auto param1 = + make_shared(element::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); + auto param2 = + make_shared(element::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3}); auto sel = make_shared(param0, param1, param2); @@ -327,10 +327,11 @@ TEST(type_prop, select_partial_all_rank_static_dynamic_ok) { } TEST(type_prop, select_partial_all_rank_static_intransitive_incompatibility) { - auto param0 = - make_shared(element::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); - auto param1 = make_shared(element::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); - auto param2 = make_shared(element::f32, PartialShape{3, Dimension::dynamic(), 3}); + auto param0 = make_shared(element::boolean, + PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + auto param1 = + make_shared(element::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); + auto param2 = make_shared(element::f32, PartialShape{3, Dimension::dynamic(), 3}); try { auto sel = make_shared(param0, param1, param2); @@ -362,9 +363,9 @@ struct DeduceV1SelectTest : ::testing::TestWithParam {}; TEST_P(DeduceV1SelectTest, output_shape) { auto tp = GetParam(); - auto cond = make_shared(tp.ets[0], tp.shapes[0]); - auto ptrue = make_shared(tp.ets[1], tp.shapes[1]); - auto pfalse = make_shared(tp.ets[2], tp.shapes[2]); + auto cond = make_shared(tp.ets[0], tp.shapes[0]); + auto ptrue = make_shared(tp.ets[1], tp.shapes[1]); + auto pfalse = make_shared(tp.ets[2], tp.shapes[2]); auto select = make_shared(cond, ptrue, pfalse, tp.auto_broadcast); ASSERT_EQ(select->get_shape(), tp.shapes[3]); @@ -411,27 +412,27 @@ INSTANTIATE_TEST_SUITE_P( PrintToDummyParamName()); TEST(type_prop, select_v1_partial_shape) { - auto a = make_shared(element::boolean, PartialShape::dynamic()); - auto b = make_shared(element::f32, Shape{2, 4}); - auto c = make_shared(element::f32, Shape{2, 4}); + auto a = make_shared(element::boolean, PartialShape::dynamic()); + auto b = make_shared(element::f32, Shape{2, 4}); + auto c = make_shared(element::f32, Shape{2, 4}); auto select = make_shared(a, b, c, op::AutoBroadcastType::NONE); ASSERT_EQ(select->get_shape(), (Shape{2, 4})); } TEST(type_prop, select_v1_partial_shape_autob) { - auto a = make_shared(element::boolean, PartialShape{Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{Dimension::dynamic()}); - auto c = make_shared(element::f32, PartialShape{2, Dimension::dynamic()}); + auto a = make_shared(element::boolean, PartialShape{Dimension::dynamic()}); + auto b = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto c = make_shared(element::f32, PartialShape{2, Dimension::dynamic()}); auto select = make_shared(a, b, c); ASSERT_TRUE(select->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic()})); } TEST(type_prop, select_v1_wrong_et) { - auto param0 = make_shared(element::i8, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 4}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + auto param0 = make_shared(element::i8, Shape{2, 4}); + auto param1 = make_shared(element::f32, Shape{2, 4}); + auto param2 = make_shared(element::f32, Shape{2, 4}); try { auto sel = make_shared(param0, param1, param2); @@ -444,9 +445,9 @@ TEST(type_prop, select_v1_wrong_et) { } TEST(type_prop, select_v1_et_mismatch) { - auto param0 = make_shared(element::boolean, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 4}); - auto param2 = make_shared(element::i8, Shape{2, 4}); + auto param0 = make_shared(element::boolean, Shape{2, 4}); + auto param1 = make_shared(element::f32, Shape{2, 4}); + auto param2 = make_shared(element::i8, Shape{2, 4}); try { auto sel = make_shared(param0, param1, param2); @@ -459,9 +460,9 @@ TEST(type_prop, select_v1_et_mismatch) { } TEST(type_prop, select_v1_shape_mismatch) { - auto param0 = make_shared(element::boolean, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 3}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + auto param0 = make_shared(element::boolean, Shape{2, 4}); + auto param1 = make_shared(element::f32, Shape{2, 3}); + auto param2 = make_shared(element::f32, Shape{2, 4}); try { auto sel = make_shared(param0, param1, param2); @@ -474,9 +475,9 @@ TEST(type_prop, select_v1_shape_mismatch) { } TEST(type_prop, select_v1_partial_shape_mismatch) { - auto param0 = make_shared(element::boolean, PartialShape{3, Dimension::dynamic()}); - auto param1 = make_shared(element::f32, PartialShape{2, Dimension::dynamic()}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + auto param0 = make_shared(element::boolean, PartialShape{3, Dimension::dynamic()}); + auto param1 = make_shared(element::f32, PartialShape{2, Dimension::dynamic()}); + auto param2 = make_shared(element::f32, Shape{2, 4}); try { auto sel = make_shared(param0, param1, param2); diff --git a/src/core/tests/type_prop/selu.cpp b/src/core/tests/type_prop/selu.cpp index e8379aee3c4e20..2f98b1526c2074 100644 --- a/src/core/tests/type_prop/selu.cpp +++ b/src/core/tests/type_prop/selu.cpp @@ -2,48 +2,48 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/selu.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, selu_basic_inference_f32_3D) { - const auto param = make_shared(element::f32, Shape{1, 32, 32}); - const auto alpha = make_shared(element::f32, Shape{1}); - const auto lambda = make_shared(element::f32, Shape{1}); - const auto selu = make_shared(param, alpha, lambda); + const auto param = make_shared(element::f32, Shape{1, 32, 32}); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto selu = make_shared(param, alpha, lambda); ASSERT_EQ(selu->get_element_type(), element::f32); ASSERT_EQ(selu->get_shape(), (Shape{1, 32, 32})); } TEST(type_prop, selu_basic_inference_f16_3D) { - const auto param = make_shared(element::f16, Shape{1, 32, 32}); - const auto alpha = make_shared(element::f16, Shape{1}); - const auto lambda = make_shared(element::f16, Shape{1}); - const auto selu = make_shared(param, alpha, lambda); + const auto param = make_shared(element::f16, Shape{1, 32, 32}); + const auto alpha = make_shared(element::f16, Shape{1}); + const auto lambda = make_shared(element::f16, Shape{1}); + const auto selu = make_shared(param, alpha, lambda); ASSERT_EQ(selu->get_element_type(), element::f16); ASSERT_EQ(selu->get_shape(), (Shape{1, 32, 32})); } TEST(type_prop, selu_basic_inference_f32_5D) { - const auto param = make_shared(element::f32, Shape{12, 135, 221, 31, 15}); - const auto alpha = make_shared(element::f32, Shape{1}); - const auto lambda = make_shared(element::f32, Shape{1}); - const auto selu = make_shared(param, alpha, lambda); + const auto param = make_shared(element::f32, Shape{12, 135, 221, 31, 15}); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto selu = make_shared(param, alpha, lambda); ASSERT_EQ(selu->get_element_type(), element::f32); ASSERT_EQ(selu->get_shape(), (Shape{12, 135, 221, 31, 15})); } TEST(type_prop, selu_basic_inference_f16_5D) { - const auto param = make_shared(element::f16, Shape{12, 135, 221, 31, 15}); - const auto alpha = make_shared(element::f16, Shape{1}); - const auto lambda = make_shared(element::f16, Shape{1}); - const auto selu = make_shared(param, alpha, lambda); + const auto param = make_shared(element::f16, Shape{12, 135, 221, 31, 15}); + const auto alpha = make_shared(element::f16, Shape{1}); + const auto lambda = make_shared(element::f16, Shape{1}); + const auto selu = make_shared(param, alpha, lambda); ASSERT_EQ(selu->get_element_type(), element::f16); ASSERT_EQ(selu->get_shape(), (Shape{12, 135, 221, 31, 15})); @@ -52,10 +52,10 @@ TEST(type_prop, selu_basic_inference_f16_5D) { TEST(type_prop, selu_incompatible_input_type_boolean) { // Invalid data input element type try { - auto data = make_shared(element::boolean, Shape{1, 2, 3, 4}); - const auto alpha = make_shared(element::boolean, Shape{1}); - const auto lambda = make_shared(element::boolean, Shape{1}); - auto selu = make_shared(data, alpha, lambda); + auto data = make_shared(element::boolean, Shape{1, 2, 3, 4}); + const auto alpha = make_shared(element::boolean, Shape{1}); + const auto lambda = make_shared(element::boolean, Shape{1}); + auto selu = make_shared(data, alpha, lambda); // Data input expected to be of numeric type FAIL() << "Invalid input type not detected"; } catch (const NodeValidationFailure& error) { @@ -68,10 +68,10 @@ TEST(type_prop, selu_incompatible_input_type_boolean) { TEST(type_prop, selu_incompatible_input_type_i32) { // Invalid data input element type try { - auto data = make_shared(element::i32, Shape{1, 2, 3, 4}); - const auto alpha = make_shared(element::i32, Shape{1}); - const auto lambda = make_shared(element::i32, Shape{1}); - auto selu = make_shared(data, alpha, lambda); + auto data = make_shared(element::i32, Shape{1, 2, 3, 4}); + const auto alpha = make_shared(element::i32, Shape{1}); + const auto lambda = make_shared(element::i32, Shape{1}); + auto selu = make_shared(data, alpha, lambda); // Data input expected to be of numeric type FAIL() << "Invalid input type not detected"; } catch (const NodeValidationFailure& error) { @@ -84,10 +84,10 @@ TEST(type_prop, selu_incompatible_input_type_i32) { TEST(type_prop, selu_incompatible_input_type_u16) { // Invalid data input element type try { - auto data = make_shared(element::u16, Shape{1, 2, 3, 4}); - const auto alpha = make_shared(element::u16, Shape{1}); - const auto lambda = make_shared(element::u16, Shape{1}); - auto selu = make_shared(data, alpha, lambda); + auto data = make_shared(element::u16, Shape{1, 2, 3, 4}); + const auto alpha = make_shared(element::u16, Shape{1}); + const auto lambda = make_shared(element::u16, Shape{1}); + auto selu = make_shared(data, alpha, lambda); // Data input expected to be of numeric type FAIL() << "Invalid input type not detected"; } catch (const NodeValidationFailure& error) { @@ -100,10 +100,10 @@ TEST(type_prop, selu_incompatible_input_type_u16) { TEST(type_prop, selu_incompatible_input_types) { // Invalid data input element type try { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto alpha = make_shared(element::f32, Shape{1}); - const auto lambda = make_shared(element::u16, Shape{1}); - auto selu = make_shared(data, alpha, lambda); + auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::u16, Shape{1}); + auto selu = make_shared(data, alpha, lambda); // Data input expected to be of numeric type FAIL() << "Inavlid input types not detected"; } catch (const NodeValidationFailure& error) { @@ -115,26 +115,26 @@ TEST(type_prop, selu_incompatible_input_types) { TEST(type_prop, selu_dynamic_rank_input_shape_2D) { const PartialShape param_shape{Dimension::dynamic(), 10}; - const auto param = std::make_shared(element::f32, param_shape); - const auto alpha = make_shared(element::f32, Shape{2, 1}); - const auto lambda = make_shared(element::f32, Shape{1}); - const auto op = std::make_shared(param, alpha, lambda); + const auto param = std::make_shared(element::f32, param_shape); + const auto alpha = make_shared(element::f32, Shape{2, 1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto op = std::make_shared(param, alpha, lambda); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 10})); } TEST(type_prop, selu_dynamic_rank_input_shape_3D) { const PartialShape param_shape{100, Dimension::dynamic(), 58}; - const auto param = std::make_shared(element::f32, param_shape); - const auto alpha = make_shared(element::f32, Shape{1}); - const auto lambda = make_shared(element::f32, Shape{1}); - const auto op = std::make_shared(param, alpha, lambda); + const auto param = std::make_shared(element::f32, param_shape); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto op = std::make_shared(param, alpha, lambda); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{100, Dimension(), 58})); } TEST(type_prop, selu_dynamic_rank_input_shape_full) { - const auto param = std::make_shared(element::f32, PartialShape::dynamic()); - const auto alpha = make_shared(element::f32, Shape{1}); - const auto lambda = make_shared(element::f32, Shape{1}); - const auto op = std::make_shared(param, alpha, lambda); + const auto param = std::make_shared(element::f32, PartialShape::dynamic()); + const auto alpha = make_shared(element::f32, Shape{1}); + const auto lambda = make_shared(element::f32, Shape{1}); + const auto op = std::make_shared(param, alpha, lambda); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } diff --git a/src/core/tests/type_prop/shape_of.cpp b/src/core/tests/type_prop/shape_of.cpp index f8de76713359d4..ac89bb5d747c40 100644 --- a/src/core/tests/type_prop/shape_of.cpp +++ b/src/core/tests/type_prop/shape_of.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/shape_of.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/broadcast.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, shape_of_v0) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); auto so = make_shared(a); ASSERT_EQ(so->get_output_element_type(0), element::i64); @@ -19,7 +20,7 @@ TEST(type_prop, shape_of_v0) { } TEST(type_prop, shape_of_partial_et_dynamic_v0) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); auto so = make_shared(a); ASSERT_EQ(so->get_output_element_type(0), element::i64); @@ -27,7 +28,8 @@ TEST(type_prop, shape_of_partial_et_dynamic_v0) { } TEST(type_prop, shape_of_partial_rank_static_dynamic_v0) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); + auto a = make_shared(element::f32, + PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); auto so = make_shared(a); ASSERT_EQ(so->get_output_element_type(0), element::i64); @@ -35,7 +37,7 @@ TEST(type_prop, shape_of_partial_rank_static_dynamic_v0) { } TEST(type_prop, shape_of_partial_rank_dynamic_v0) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::f32, PartialShape::dynamic()); auto so = make_shared(a); ASSERT_EQ(so->get_output_element_type(0), element::i64); @@ -43,7 +45,7 @@ TEST(type_prop, shape_of_partial_rank_dynamic_v0) { } TEST(type_prop, shape_of_v3) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); auto so = make_shared(a); ASSERT_EQ(so->get_output_element_type(0), element::i64); @@ -51,7 +53,7 @@ TEST(type_prop, shape_of_v3) { } TEST(type_prop, shape_of_partial_et_dynamic_v3) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); auto so = make_shared(a); ASSERT_EQ(so->get_output_element_type(0), element::i64); @@ -59,7 +61,8 @@ TEST(type_prop, shape_of_partial_et_dynamic_v3) { } TEST(type_prop, shape_of_partial_rank_static_dynamic_v3) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); + auto a = make_shared(element::f32, + PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); auto so = make_shared(a); ASSERT_EQ(so->get_output_element_type(0), element::i64); @@ -67,7 +70,7 @@ TEST(type_prop, shape_of_partial_rank_static_dynamic_v3) { } TEST(type_prop, shape_of_partial_rank_dynamic_v3) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::f32, PartialShape::dynamic()); auto so = make_shared(a); ASSERT_EQ(so->get_output_element_type(0), element::i64); @@ -75,7 +78,7 @@ TEST(type_prop, shape_of_partial_rank_dynamic_v3) { } TEST(type_prop, shape_of_output_type_v3) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); auto so = make_shared(a, element::i32); try { auto sx = make_shared(a, element::i8); @@ -108,9 +111,9 @@ TEST(type_prop, shape_of_1_dynamic_value_and_label_propagation) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); - auto shape_0 = std::make_shared(param_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); + auto shape_0 = std::make_shared(param_0); auto bc = std::make_shared(param, shape_0); ASSERT_EQ(bc->get_shape(), (Shape{3, 4})); @@ -124,8 +127,8 @@ TEST(type_prop, shape_of_3_dynamic_value_and_label_propagation) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); auto shape_0 = std::make_shared(param_0); auto bc = std::make_shared(param, shape_0); diff --git a/src/core/tests/type_prop/shuffle_channels.cpp b/src/core/tests/type_prop/shuffle_channels.cpp index ed9a9d3ff4fe00..314fdf3f85ade4 100644 --- a/src/core/tests/type_prop/shuffle_channels.cpp +++ b/src/core/tests/type_prop/shuffle_channels.cpp @@ -2,18 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/shuffle_channels.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, shuffle_channels_default_4D) { const auto data_input_shape = Shape{3, 9, 4, 5}; - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto shuffle_channels = make_shared(data); EXPECT_EQ(shuffle_channels->get_element_type(), element::f32); @@ -22,7 +22,7 @@ TEST(type_prop, shuffle_channels_default_4D) { TEST(type_prop, shuffle_channels_basic_4D) { const auto data_input_shape = Shape{3, 9, 4, 5}; - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto axis = 1; const auto group = 3; const auto shuffle_channels = make_shared(data, axis, group); @@ -34,7 +34,7 @@ TEST(type_prop, shuffle_channels_basic_4D) { TEST(type_prop, shuffle_channels_dynamic_4D) { auto data_input_shape = PartialShape{Dimension::dynamic(), Dimension(3, 9), 4, Dimension(4, 15)}; set_shape_labels(data_input_shape, 10); - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto axis = 1; const auto group = 3; const auto shuffle_channels = make_shared(data, axis, group); @@ -46,7 +46,7 @@ TEST(type_prop, shuffle_channels_dynamic_4D) { TEST(type_prop, shuffle_channels_dynamic_fully) { const auto data_input_shape = PartialShape::dynamic(); - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto axis = 1; const auto group = 3; const auto shuffle_channels = make_shared(data, axis, group); @@ -59,7 +59,7 @@ TEST(type_prop, shuffle_channels_ND_bigger) { { // 5D const auto data_input_shape = Shape{2, 3, 9, 4, 5}; - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto axis = 2; const auto group = 3; const auto shuffle_channels = make_shared(data, axis, group); @@ -69,7 +69,7 @@ TEST(type_prop, shuffle_channels_ND_bigger) { { // 6D const auto data_input_shape = Shape{6, 2, 3, 9, 4, 5}; - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto axis = 3; const auto group = 3; const auto shuffle_channels = make_shared(data, axis, group); @@ -82,7 +82,7 @@ TEST(type_prop, shuffle_channels_ND_smaller) { { // 3D const auto data_input_shape = Shape{5, 4, 9}; - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto axis = 2; const auto group = 3; const auto shuffle_channels = make_shared(data, axis, group); @@ -92,7 +92,7 @@ TEST(type_prop, shuffle_channels_ND_smaller) { { // 2D const auto data_input_shape = Shape{9, 20}; - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto axis = 0; const auto group = 3; const auto shuffle_channels = make_shared(data, axis, group); @@ -102,7 +102,7 @@ TEST(type_prop, shuffle_channels_ND_smaller) { { // 1D const auto data_input_shape = Shape{9}; - const auto data = make_shared(element::f32, data_input_shape); + const auto data = make_shared(element::f32, data_input_shape); const auto axis = 0; const auto group = 3; const auto shuffle_channels = make_shared(data, axis, group); @@ -112,7 +112,7 @@ TEST(type_prop, shuffle_channels_ND_smaller) { } TEST(type_prop, shuffle_channels_axis_validation) { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 4}); + const auto data = make_shared(element::f64, Shape{1, 2, 3, 4}); OV_EXPECT_THROW(const auto op = make_shared(data, -5, 5), ov::AssertFailure, @@ -120,7 +120,7 @@ TEST(type_prop, shuffle_channels_axis_validation) { } TEST(type_prop, shuffle_channels_negative_axis_calculation) { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 4}); + const auto data = make_shared(element::f64, Shape{1, 2, 3, 4}); const auto shuffle_channels = make_shared(data, -3, 2); @@ -132,21 +132,21 @@ TEST(type_prop, shuffle_channels_infer_shape_with_negative_axis_calculation) { const auto group = 2; { const auto data_input_shape = Shape{1, 3, 5, 8}; - const auto data = make_shared(element::f64, data_input_shape); + const auto data = make_shared(element::f64, data_input_shape); const auto shuffle_channels = make_shared(data, -1, group); EXPECT_EQ(shuffle_channels->get_output_partial_shape(0), data_input_shape); } { const auto data_input_shape = Shape{1, 3, 8, 5}; - const auto data = make_shared(element::f64, data_input_shape); + const auto data = make_shared(element::f64, data_input_shape); const auto shuffle_channels = make_shared(data, -2, group); EXPECT_EQ(shuffle_channels->get_output_partial_shape(0), data_input_shape); } { const auto data_input_shape = Shape{8, 3, 5, 7}; - const auto data = make_shared(element::f64, data_input_shape); + const auto data = make_shared(element::f64, data_input_shape); const auto shuffle_channels = make_shared(data, -4, group); EXPECT_EQ(shuffle_channels->get_output_partial_shape(0), data_input_shape); @@ -154,7 +154,7 @@ TEST(type_prop, shuffle_channels_infer_shape_with_negative_axis_calculation) { } TEST(type_prop, shuffle_channels_invalid_input_shape) { - const auto data = make_shared(element::f64, Shape{}); + const auto data = make_shared(element::f64, Shape{}); OV_EXPECT_THROW(const auto op = make_shared(data, 0, 1), NodeValidationFailure, @@ -162,7 +162,7 @@ TEST(type_prop, shuffle_channels_invalid_input_shape) { } TEST(type_prop, shuffle_channels_invalid_groups_value) { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 15}); + const auto data = make_shared(element::f64, Shape{1, 2, 3, 15}); OV_EXPECT_THROW(const auto op = make_shared(data, -1, 2), NodeValidationFailure, @@ -171,7 +171,7 @@ TEST(type_prop, shuffle_channels_invalid_groups_value) { TEST(type_prop, shuffle_channels_default_ctor) { const auto data_shape = PartialShape{{2, 5}, {0, 2}, 3, {2, -1}}; - const auto data = make_shared(element::i32, data_shape); + const auto data = make_shared(element::i32, data_shape); const auto shuffle_channels = make_shared(); shuffle_channels->set_axis(-3); diff --git a/src/core/tests/type_prop/sigmoid.cpp b/src/core/tests/type_prop/sigmoid.cpp index 8d8eb1bfefaa7b..5f570d83377ec2 100644 --- a/src/core/tests/type_prop/sigmoid.cpp +++ b/src/core/tests/type_prop/sigmoid.cpp @@ -2,34 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/sigmoid.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, sigmoid) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto sigmoid_func = make_shared(data); + auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto sigmoid_func = make_shared(data); EXPECT_EQ(sigmoid_func->get_element_type(), element::f32); EXPECT_EQ(sigmoid_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, sigmoid_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); - auto sigmoid_func = make_shared(data); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto sigmoid_func = make_shared(data); EXPECT_EQ(sigmoid_func->get_element_type(), element::f32); ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown - auto sigmoid_partial = make_shared(make_shared(element::f32, PartialShape::dynamic())); + auto sigmoid_partial = + make_shared(make_shared(element::f32, PartialShape::dynamic())); ASSERT_TRUE(sigmoid_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, sigmoid_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); - auto sigmoid_func = make_shared(data); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto sigmoid_func = make_shared(data); EXPECT_EQ(sigmoid_func->get_element_type(), element::f32); ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); ASSERT_TRUE(sigmoid_func->get_output_partial_shape(0).rank().is_static()); diff --git a/src/core/tests/type_prop/sign.cpp b/src/core/tests/type_prop/sign.cpp index fdf2aaa7d0be0d..a4e2e52736d838 100644 --- a/src/core/tests/type_prop/sign.cpp +++ b/src/core/tests/type_prop/sign.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/sign.hpp" + #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_sign, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/sin.cpp b/src/core/tests/type_prop/sin.cpp index 8af6d2ae750bba..c1e1892dda26d3 100644 --- a/src/core/tests/type_prop/sin.cpp +++ b/src/core/tests/type_prop/sin.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/sin.hpp" + #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_sin, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/sinh.cpp b/src/core/tests/type_prop/sinh.cpp index 3100846c324cd7..396bbc5db9e18e 100644 --- a/src/core/tests/type_prop/sinh.cpp +++ b/src/core/tests/type_prop/sinh.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/sinh.hpp" + #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_sinh, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/slice.cpp b/src/core/tests/type_prop/slice.cpp index a00c6317e6d773..b4ef1dcff27115 100644 --- a/src/core/tests/type_prop/slice.cpp +++ b/src/core/tests/type_prop/slice.cpp @@ -6,12 +6,11 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/opsets/opset9.hpp" #include "sequnce_generator.hpp" -using namespace ngraph; +using namespace ov; using namespace testing; namespace { @@ -1067,9 +1066,9 @@ TEST(type_prop, slice_v8_dynamic_value_and_label_propagation) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); - auto shape_0 = std::make_shared(param_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); + auto shape_0 = std::make_shared(param_0); const auto& et = element::i64; std::vector start_val{0}, stop_val{1}, step_val{1}; @@ -1105,10 +1104,10 @@ TEST(type_prop, slice_v8_dynamic_dimension_but_slice_min_is_lt_input_min_size) { TEST(type_prop, slice_v8_use_default_ctor) { const auto zero_mask = std::vector(3, 0); - auto data = std::make_shared(element::f32, PartialShape{10, 11, 12, 2}); - auto start = op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); - auto stop = op::Constant::create(element::i64, Shape{4}, {1, 5, 20, 20}); - auto step = op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}); + auto data = std::make_shared(element::f32, PartialShape{10, 11, 12, 2}); + auto start = ov::op::v0::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); + auto stop = ov::op::v0::Constant::create(element::i64, Shape{4}, {1, 5, 20, 20}); + auto step = ov::op::v0::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}); auto slice = std::make_shared(); slice->set_arguments(ov::OutputVector{data, start, stop, step}); @@ -1120,12 +1119,12 @@ TEST(type_prop, slice_v8_use_default_ctor) { TEST(type_prop, slice_v8_stop_is_shape_of_with_bounds) { auto shape = PartialShape{1, {5, 7}}; set_shape_labels(shape, 20); - const auto p_stop = std::make_shared(element::i64, shape); - const auto shape_of_stop = std::make_shared(p_stop); + const auto p_stop = std::make_shared(element::i64, shape); + const auto shape_of_stop = std::make_shared(p_stop); - auto data = op::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); - auto start = op::Constant::create(element::i64, Shape{2}, {0, 0}); - auto steps = op::Constant::create(element::i64, Shape{2}, {1, 1}); + auto data = ov::op::v0::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); + auto start = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 0}); + auto steps = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 1}); auto slice = std::make_shared(data, start, shape_of_stop, steps); @@ -1136,12 +1135,12 @@ TEST(type_prop, slice_v8_stop_is_shape_of_with_bounds) { TEST(type_prop, slice_v8_start_is_shape_of_with_bounds) { auto shape = PartialShape{0, {3, 5}}; set_shape_labels(shape, 20); - const auto p_start = std::make_shared(element::i64, shape); - const auto shape_of_start = std::make_shared(p_start); + const auto p_start = std::make_shared(element::i64, shape); + const auto shape_of_start = std::make_shared(p_start); - auto data = op::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); - auto stop = op::Constant::create(element::i64, Shape{2}, {1, 7}); - auto steps = op::Constant::create(element::i64, Shape{2}, {1, 1}); + auto data = ov::op::v0::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); + auto stop = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 7}); + auto steps = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 1}); auto slice = std::make_shared(data, shape_of_start, stop, steps); @@ -1154,13 +1153,13 @@ TEST(type_prop, slice_v8_start_stop_is_shape_of_with_bounds) { auto stop_shape = PartialShape{2, {6, 7}}; set_shape_labels(start_shape, 10); set_shape_labels(stop_shape, 20); - const auto p_start = std::make_shared(element::i64, start_shape); - const auto p_stop = std::make_shared(element::i64, stop_shape); - const auto shape_of_start = std::make_shared(p_start); - const auto shape_of_stop = std::make_shared(p_stop); + const auto p_start = std::make_shared(element::i64, start_shape); + const auto p_stop = std::make_shared(element::i64, stop_shape); + const auto shape_of_start = std::make_shared(p_start); + const auto shape_of_stop = std::make_shared(p_stop); - auto data = op::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); - auto steps = op::Constant::create(element::i64, Shape{2}, {1, 1}); + auto data = ov::op::v0::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); + auto steps = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 1}); auto slice = std::make_shared(data, shape_of_start, shape_of_stop, steps); @@ -1169,11 +1168,11 @@ TEST(type_prop, slice_v8_start_stop_is_shape_of_with_bounds) { } TEST(type_prop, slice_v8_unknowns_axes) { - const auto data = std::make_shared(element::i64, Shape{5, 10, 15}); - const auto start = std::make_shared(element::i64, PartialShape{-1}); - const auto stop = std::make_shared(element::i64, Shape{1}); - const auto steps = std::make_shared(element::i64, Shape{1}); - const auto axes = std::make_shared(element::i64, Shape{1}); + const auto data = std::make_shared(element::i64, Shape{5, 10, 15}); + const auto start = std::make_shared(element::i64, PartialShape{-1}); + const auto stop = std::make_shared(element::i64, Shape{1}); + const auto steps = std::make_shared(element::i64, Shape{1}); + const auto axes = std::make_shared(element::i64, Shape{1}); auto slice = std::make_shared(data, start, stop, steps, axes); @@ -1181,11 +1180,11 @@ TEST(type_prop, slice_v8_unknowns_axes) { } TEST(type_prop, slice_v8_inf_dim_start_from_last_N_to_end) { - auto data = std::make_shared(element::f32, PartialShape{1, 256, -1}); - auto start = op::Constant::create(element::i64, Shape{1}, {-7}); - auto stop = op::Constant::create(element::i64, Shape{1}, std::vector{INT64_MAX}); - auto step = op::Constant::create(element::i64, Shape{1}, {1}); - auto axes = op::Constant::create(element::i64, Shape{1}, {2}); + auto data = std::make_shared(element::f32, PartialShape{1, 256, -1}); + auto start = ov::op::v0::Constant::create(element::i64, Shape{1}, {-7}); + auto stop = ov::op::v0::Constant::create(element::i64, Shape{1}, std::vector{INT64_MAX}); + auto step = ov::op::v0::Constant::create(element::i64, Shape{1}, {1}); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{1}, {2}); auto slice = std::make_shared(data, start, stop, step, axes); diff --git a/src/core/tests/type_prop/softmax.cpp b/src/core/tests/type_prop/softmax.cpp index ab22ea775c2376..fd9fb328290ddd 100644 --- a/src/core/tests/type_prop/softmax.cpp +++ b/src/core/tests/type_prop/softmax.cpp @@ -2,50 +2,53 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/softmax.hpp" + +#include + +#include "openvino/op/parameter.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, softmax_default_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto sm = make_shared(arg); ASSERT_EQ(sm->get_axis(), 1); } TEST(type_prop, softmax_out_of_bound_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); // axis cannot be a negative number - ASSERT_THROW(const auto unused = make_shared(arg, -1), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = make_shared(arg, -1), ov::NodeValidationFailure); } TEST(type_prop, softmax_8_default_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto sm = make_shared(arg); ASSERT_EQ(sm->get_axis(), 1); } TEST(type_prop, softmax_8_out_of_bound_negative_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); // axis should be in range [-rank, rank - 1] - ASSERT_THROW(const auto unused = make_shared(arg, -10), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = make_shared(arg, -10), ov::NodeValidationFailure); } TEST(type_prop, softmax_8_out_of_bound_positive_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); // axis should be in range [-rank, rank - 1] - ASSERT_THROW(const auto unused = make_shared(arg, 10), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = make_shared(arg, 10), ov::NodeValidationFailure); } TEST(type_prop, softmax_8_positive_axis) { const Shape arg_shape{1, 10}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto softmax = make_shared(arg, 1); ASSERT_EQ(softmax->get_element_type(), element::f32); ASSERT_EQ(softmax->get_shape(), (Shape{1, 10})); @@ -53,7 +56,7 @@ TEST(type_prop, softmax_8_positive_axis) { TEST(type_prop, softmax_8_negative_axis) { const Shape arg_shape{1, 10}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::f32, arg_shape); auto softmax = make_shared(arg, -1); ASSERT_EQ(softmax->get_element_type(), element::f32); ASSERT_EQ(softmax->get_shape(), (Shape{1, 10})); diff --git a/src/core/tests/type_prop/softplus.cpp b/src/core/tests/type_prop/softplus.cpp index df1939a429b6d3..eee24728ae836a 100644 --- a/src/core/tests/type_prop/softplus.cpp +++ b/src/core/tests/type_prop/softplus.cpp @@ -2,34 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/softplus.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, softplus) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); auto softplus_func = make_shared(data); EXPECT_EQ(softplus_func->get_element_type(), element::f32); EXPECT_EQ(softplus_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, softplus_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto softplus_func = make_shared(data); EXPECT_EQ(softplus_func->get_element_type(), element::f32); ASSERT_TRUE(softplus_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto softplus_partial = - make_shared(make_shared(element::f32, PartialShape::dynamic())); + make_shared(make_shared(element::f32, PartialShape::dynamic())); ASSERT_TRUE(softplus_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, softplus_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto softplus_func = make_shared(data); EXPECT_EQ(softplus_func->get_element_type(), element::f32); ASSERT_TRUE(softplus_func->get_output_partial_shape(0).same_scheme((PartialShape{1, Dimension::dynamic(), 6}))); @@ -37,7 +39,7 @@ TEST(type_prop, softplus_partial_static_rank) { } TEST(type_prop, softplus_invalid_element_type) { - auto data = make_shared(element::i32, Shape{2, 2}); + auto data = make_shared(element::i32, Shape{2, 2}); try { auto softplus = make_shared(data); diff --git a/src/core/tests/type_prop/space_to_batch.cpp b/src/core/tests/type_prop/space_to_batch.cpp index b5e41e3c140720..9ec5faac1aa914 100644 --- a/src/core/tests/type_prop/space_to_batch.cpp +++ b/src/core/tests/type_prop/space_to_batch.cpp @@ -2,21 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/space_to_batch.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/batch_to_space.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d)) TEST(type_prop, space_to_batch_output_shape_2D) { - auto data = make_shared(element::f32, Shape{2, 128}); - auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); - auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); + auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -25,10 +30,10 @@ TEST(type_prop, space_to_batch_output_shape_2D) { } TEST(type_prop, space_to_batch_output_shape_4D) { - auto data = make_shared(element::f32, Shape{2, 64, 64, 3}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto data = make_shared(element::f32, Shape{2, 64, 64, 3}); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -37,10 +42,10 @@ TEST(type_prop, space_to_batch_output_shape_4D) { } TEST(type_prop, space_to_batch_output_shape_5D) { - auto data = make_shared(element::f32, Shape{2, 32, 64, 128, 256}); - auto block_shape = make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); - auto pads_begin = make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); - auto pads_end = make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); + auto data = make_shared(element::f32, Shape{2, 32, 64, 128, 256}); + auto block_shape = make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); + auto pads_begin = make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); + auto pads_end = make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -49,10 +54,10 @@ TEST(type_prop, space_to_batch_output_shape_5D) { } TEST(type_prop, space_to_batch_and_batch_to_space) { - auto data = make_shared(element::f32, PartialShape{2, {100, -1}, 1024, 3}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + auto data = make_shared(element::f32, PartialShape{2, {100, -1}, 1024, 3}); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -68,10 +73,10 @@ TEST(type_prop, space_to_batch_and_batch_to_space) { TEST(type_prop, space_to_batch_when_space_is_static) { auto data_shape = PartialShape{{2, 5}, 100, 1024, 3}; set_shape_labels(data_shape, 10); - auto data = make_shared(element::f32, data_shape); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + auto data = make_shared(element::f32, data_shape); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -84,10 +89,10 @@ TEST(type_prop, space_to_batch_when_space_is_static) { TEST(type_prop, space_to_batch_when_data_dynamic_) { auto data_shape = PartialShape{{2, 5}, {5, 100}, {100, 1024}, {3, 10}}; set_shape_labels(data_shape, 10); - auto data = make_shared(element::f32, data_shape); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 1, 1, 1}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{1, 0, 2, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{1, 0, 3, 0}); + auto data = make_shared(element::f32, data_shape); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 1, 1, 1}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{1, 0, 2, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{1, 0, 3, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -99,10 +104,10 @@ TEST(type_prop, space_to_batch_when_data_dynamic_) { TEST(type_prop, space_to_batch_when_space_is_dynamic) { auto data_shape = PartialShape{{2, 5}, {5, 100}, {100, 1024}, {3, 10}}; set_shape_labels(data_shape, 10); - auto data = make_shared(element::f32, data_shape); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + auto data = make_shared(element::f32, data_shape); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -115,10 +120,10 @@ TEST(type_prop, space_to_batch_when_space_is_dynamic) { } TEST(type_prop, space_to_batch_dynamic_shape_static_rank) { - auto data = make_shared(element::f32, PartialShape::dynamic(4)); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 2, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto data = make_shared(element::f32, PartialShape::dynamic(4)); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 2, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -127,10 +132,10 @@ TEST(type_prop, space_to_batch_dynamic_shape_static_rank) { } TEST(type_prop, space_to_batch_dynamic_shape_dynamic_rank) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto data = make_shared(element::f32, PartialShape::dynamic()); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -139,10 +144,10 @@ TEST(type_prop, space_to_batch_dynamic_shape_dynamic_rank) { } TEST(type_prop, space_to_batch_dynamic_rank_shape_block_and_pads_not_const) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto block_shape = make_shared(element::i64, Shape{4}); - auto pads_begin = make_shared(element::i64, Shape{4}); - auto pads_end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::f32, PartialShape::dynamic()); + auto block_shape = make_shared(element::i64, Shape{4}); + auto pads_begin = make_shared(element::i64, Shape{4}); + auto pads_end = make_shared(element::i64, Shape{4}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -151,10 +156,10 @@ TEST(type_prop, space_to_batch_dynamic_rank_shape_block_and_pads_not_const) { } TEST(type_prop, space_to_batch_default_ctor) { - auto data = make_shared(element::f32, PartialShape{{2, 5}, 100, {100, -1}, 3}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 4, 1}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{1, 1, 2, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{1, 1, 6, 0}); + auto data = make_shared(element::f32, PartialShape{{2, 5}, 100, {100, -1}, 3}); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 4, 1}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{1, 1, 2, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{1, 1, 6, 0}); auto space_to_batch = make_shared(); space_to_batch->set_arguments(OutputVector{data, block_shape, pads_begin, pads_end}); @@ -168,11 +173,11 @@ TEST(type_prop, space_to_batch_default_ctor) { } TEST(type_prop, space_to_batch_non_const_inputs) { - auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); - auto block_shape = make_shared(element::i64, PartialShape{4}); - auto pads_begin = make_shared(element::i64, PartialShape{4}); - auto pads_end = make_shared(element::i64, PartialShape{4}); + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto pads_begin = make_shared(element::i64, PartialShape{4}); + auto pads_end = make_shared(element::i64, PartialShape{4}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); EXPECT_EQ(space_to_batch->get_element_type(), element::f32); @@ -180,10 +185,10 @@ TEST(type_prop, space_to_batch_non_const_inputs) { } TEST(type_prop, space_to_batch_block_non_constant_only) { - auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); - auto block_shape = make_shared(element::i64, PartialShape{4}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); EXPECT_EQ(space_to_batch->get_element_type(), element::f32); @@ -191,11 +196,11 @@ TEST(type_prop, space_to_batch_block_non_constant_only) { } TEST(type_prop, space_to_batch_crops_non_constant_only) { - auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 5, 1}); - auto pads_begin = make_shared(element::i64, PartialShape{4}); - auto pads_end = make_shared(element::i64, PartialShape{4}); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 5, 1}); + auto pads_begin = make_shared(element::i64, PartialShape{4}); + auto pads_end = make_shared(element::i64, PartialShape{4}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); EXPECT_EQ(space_to_batch->get_element_type(), element::f32); @@ -203,10 +208,10 @@ TEST(type_prop, space_to_batch_crops_non_constant_only) { } TEST(type_prop, space_to_batch_invalid_element_type_block_shape) { - auto data = make_shared(element::f32, Shape{2, 128}); - auto block_shape = make_shared(element::f32, Shape{2}, vector{1, 5}); - auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::f32, Shape{2}, vector{1, 5}); + auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); try { auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -220,10 +225,10 @@ TEST(type_prop, space_to_batch_invalid_element_type_block_shape) { } TEST(type_prop, space_to_batch_invalid_element_type_pads_begin) { - auto data = make_shared(element::f32, Shape{2, 128}); - auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); - auto pads_begin = make_shared(element::f32, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); + auto pads_begin = make_shared(element::f32, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); try { auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -237,10 +242,10 @@ TEST(type_prop, space_to_batch_invalid_element_type_pads_begin) { } TEST(type_prop, space_to_batch_invalid_element_type_pads_end) { - auto data = make_shared(element::f32, Shape{2, 128}); - auto block_shape = make_shared(element::i16, Shape{2}, vector{1, 5}); - auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::f32, Shape{2}, vector{0, 0}); + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i16, Shape{2}, vector{1, 5}); + auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::f32, Shape{2}, vector{0, 0}); try { auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); @@ -254,10 +259,10 @@ TEST(type_prop, space_to_batch_invalid_element_type_pads_end) { } TEST(type_prop, space_to_batch_invalid_value_block_shape) { - auto data = make_shared(element::f32, Shape{2, 128}); - auto block_shape = make_shared(element::i64, Shape{2}, vector{-1, -5}); - auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto data = make_shared(element::f32, Shape{2, 128}); + auto block_shape = make_shared(element::i64, Shape{2}, vector{-1, -5}); + auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); + auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); try { auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); diff --git a/src/core/tests/type_prop/space_to_depth.cpp b/src/core/tests/type_prop/space_to_depth.cpp index 033f12adf72d0b..a4a21069d3b9aa 100644 --- a/src/core/tests/type_prop/space_to_depth.cpp +++ b/src/core/tests/type_prop/space_to_depth.cpp @@ -2,47 +2,49 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/space_to_depth.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d)) TEST(type_prop, space_to_depth_output_shape_block_first_4D) { - auto A = make_shared(element::f32, Shape{1, 2, 64, 64}); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; - auto space_to_depth = make_shared(A, mode, 8); + auto A = make_shared(element::f32, Shape{1, 2, 64, 64}); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; + auto space_to_depth = make_shared(A, mode, 8); ASSERT_EQ(space_to_depth->get_element_type(), element::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 128, 8, 8})); } TEST(type_prop, space_to_depth_output_shape_block_first_4D_2) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; - auto space_to_depth = make_shared(A, mode, 2); + auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; + auto space_to_depth = make_shared(A, mode, 2); ASSERT_EQ(space_to_depth->get_element_type(), element::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 4, 1080 / 2, 1616 / 2})); } TEST(type_prop, space_to_depth_output_shape_depth_first_4D) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; - auto space_to_depth = make_shared(A, mode, 2); + auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; + auto space_to_depth = make_shared(A, mode, 2); ASSERT_EQ(space_to_depth->get_element_type(), element::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 4, 1080 / 2, 1616 / 2})); } TEST(type_prop, space_to_depth_output_shape_depth_first_5D) { - auto A = make_shared(element::f32, Shape{1, 12, 4, 1080, 1616}); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; - auto space_to_depth = make_shared(A, mode, 2); + auto A = make_shared(element::f32, Shape{1, 12, 4, 1080, 1616}); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; + auto space_to_depth = make_shared(A, mode, 2); ASSERT_EQ(space_to_depth->get_element_type(), element::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2})); @@ -51,9 +53,9 @@ TEST(type_prop, space_to_depth_output_shape_depth_first_5D) { TEST(type_prop, space_to_depth_output_shape_depth_first_5D_1) { auto a_shape = PartialShape{{1, 4}, {12, 36}, 1080, 1616}; set_shape_labels(a_shape, 10); - auto A = make_shared(element::f32, a_shape); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; - auto space_to_depth = make_shared(A, mode, 1); + auto A = make_shared(element::f32, a_shape); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; + auto space_to_depth = make_shared(A, mode, 1); EXPECT_EQ(space_to_depth->get_element_type(), element::f32); EXPECT_EQ(space_to_depth->get_output_partial_shape(0), a_shape); @@ -63,9 +65,9 @@ TEST(type_prop, space_to_depth_output_shape_depth_first_5D_1) { TEST(type_prop, space_to_depth_output_shape_when_space_is_static) { auto a_shape = PartialShape{{1, 4}, {12, 36}, 1080, 1616}; set_shape_labels(a_shape, 10); - auto A = make_shared(element::f32, a_shape); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; - auto space_to_depth = make_shared(A, mode, 2); + auto A = make_shared(element::f32, a_shape); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; + auto space_to_depth = make_shared(A, mode, 2); EXPECT_EQ(space_to_depth->get_element_type(), element::f32); EXPECT_EQ(space_to_depth->get_output_partial_shape(0), @@ -77,9 +79,9 @@ TEST(type_prop, space_to_depth_output_shape_when_space_is_static) { TEST(type_prop, space_to_depth_output_shape_when_space_is_dynamic) { auto a_shape = PartialShape{{1, 4}, {12, 36}, {100, 1081}, {99, 1616}}; set_shape_labels(a_shape, 10); - auto A = make_shared(element::f32, a_shape); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; - auto space_to_depth = make_shared(A, mode, 2); + auto A = make_shared(element::f32, a_shape); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; + auto space_to_depth = make_shared(A, mode, 2); EXPECT_EQ(space_to_depth->get_element_type(), element::f32); EXPECT_EQ( @@ -90,34 +92,34 @@ TEST(type_prop, space_to_depth_output_shape_when_space_is_dynamic) { } TEST(type_prop, space_to_depth_dynamic_shape_static_rank) { - auto A = make_shared(element::f32, PartialShape::dynamic(4)); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; - auto space_to_depth = make_shared(A, mode, 8); + auto A = make_shared(element::f32, PartialShape::dynamic(4)); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; + auto space_to_depth = make_shared(A, mode, 8); ASSERT_EQ(space_to_depth->get_element_type(), element::f32); ASSERT_EQ(space_to_depth->get_output_partial_shape(0), PartialShape::dynamic(4)); } TEST(type_prop, space_to_depth_dynamic_shape_dynamic_rank) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; - auto space_to_depth = make_shared(A, mode, 8); + auto A = make_shared(element::f32, PartialShape::dynamic()); + const auto mode = ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; + auto space_to_depth = make_shared(A, mode, 8); ASSERT_EQ(space_to_depth->get_element_type(), element::f32); ASSERT_EQ(space_to_depth->get_output_partial_shape(0), PartialShape::dynamic()); } TEST(type_prop, space_to_depth_default_ctor) { - auto A = make_shared(element::f64, PartialShape{{1, 4}, {12, 36}, 900, 3}); + auto A = make_shared(element::f64, PartialShape{{1, 4}, {12, 36}, 900, 3}); - const auto space_to_depth = make_shared(); + const auto space_to_depth = make_shared(); space_to_depth->set_block_size(3); - space_to_depth->set_mode(op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST); + space_to_depth->set_mode(op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST); space_to_depth->set_argument(0, A); space_to_depth->validate_and_infer_types(); EXPECT_EQ(space_to_depth->get_block_size(), 3); - EXPECT_EQ(space_to_depth->get_mode(), op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST); + EXPECT_EQ(space_to_depth->get_mode(), op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST); EXPECT_EQ(space_to_depth->get_input_size(), 1); EXPECT_EQ(space_to_depth->get_output_size(), 1); EXPECT_EQ(space_to_depth->get_element_type(), element::f64); @@ -125,11 +127,12 @@ TEST(type_prop, space_to_depth_default_ctor) { } TEST(type_prop, space_to_depth_input_rank_not_supported) { - auto A = make_shared(element::f32, Shape{1, 8}); + auto A = make_shared(element::f32, Shape{1, 8}); try { - auto space_to_depth = make_shared(A, op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 2); + auto space_to_depth = + make_shared(A, op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 2); FAIL() << "Not supported input shape for SpaceToDepth exception not thrown"; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), "The input tensor with rank lower than 3 is not supported (input rank: 2)"); } catch (...) { FAIL() << "SpaceToDepth decomposition failed for unexpected reason"; @@ -137,11 +140,12 @@ TEST(type_prop, space_to_depth_input_rank_not_supported) { } TEST(type_prop, space_to_depth_blocksize_not_matched) { - auto A = make_shared(element::f32, Shape{1, 3, 8, 7}); + auto A = make_shared(element::f32, Shape{1, 3, 8, 7}); try { - auto space_to_depth = make_shared(A, op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 4); + auto space_to_depth = + make_shared(A, op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 4); FAIL() << "Not matched blocksize SpaceToDepth exception not thrown"; - } catch (const ngraph_error& error) { + } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), "Dimension value: [ 7, 7] must be a multiple of divisor: 4"); } catch (...) { FAIL() << "SpaceToDepth decomposition failed for unexpected reason"; diff --git a/src/core/tests/type_prop/split.cpp b/src/core/tests/type_prop/split.cpp index 842e0c79cce6ef..e449b3242cd2f4 100644 --- a/src/core/tests/type_prop/split.cpp +++ b/src/core/tests/type_prop/split.cpp @@ -2,20 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/split.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/op/util/attr_types.hpp" #include "sequnce_generator.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, split_v1_axis_const_positive) { - const auto data = make_shared(element::f16, Shape{2, 3, 4}); - const auto axis = op::Constant::create(element::i64, {}, {1}); + const auto data = make_shared(element::f16, Shape{2, 3, 4}); + const auto axis = ov::op::v0::Constant::create(element::i64, {}, {1}); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -27,8 +31,8 @@ TEST(type_prop, split_v1_axis_const_positive) { } TEST(type_prop, split_v1_axis_const_negative) { - const auto data = make_shared(element::i32, Shape{2, 6}); - const auto axis = op::Constant::create(element::i64, {}, {-2}); + const auto data = make_shared(element::i32, Shape{2, 6}); + const auto axis = ov::op::v0::Constant::create(element::i64, {}, {-2}); constexpr size_t num_splits = 2; const auto split = make_shared(data, axis, num_splits); @@ -40,8 +44,8 @@ TEST(type_prop, split_v1_axis_const_negative) { } TEST(type_prop, split_v1_axis_const_data_axis_dim_known) { - const auto data = make_shared(element::f32, PartialShape{2, 12, Dimension::dynamic()}); - const auto axis = op::Constant::create(element::i32, {}, {1}); + const auto data = make_shared(element::f32, PartialShape{2, 12, Dimension::dynamic()}); + const auto axis = ov::op::v0::Constant::create(element::i32, {}, {1}); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -52,8 +56,8 @@ TEST(type_prop, split_v1_axis_const_data_axis_dim_known) { TEST(type_prop, split_v1_axis_const_only_data_axis_dim_known) { const auto data = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); - const auto axis = op::Constant::create(element::i16, {}, {0}); + make_shared(element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + const auto axis = ov::op::v0::Constant::create(element::i16, {}, {0}); constexpr size_t num_splits = 2; const auto split = make_shared(data, axis, num_splits); @@ -65,8 +69,8 @@ TEST(type_prop, split_v1_axis_const_only_data_axis_dim_known) { } TEST(type_prop, split_v1_axis_const_data_axis_dim_unknown) { - const auto data = make_shared(element::f32, PartialShape{4, Dimension::dynamic(), 3, 5}); - const auto axis = op::Constant::create(element::i8, {}, {1}); + const auto data = make_shared(element::f32, PartialShape{4, Dimension::dynamic(), 3, 5}); + const auto axis = ov::op::v0::Constant::create(element::i8, {}, {1}); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -77,8 +81,9 @@ TEST(type_prop, split_v1_axis_const_data_axis_dim_unknown) { } TEST(type_prop, split_v1_axis_const_data_axis_dim_interval_known_divisible) { - const auto data = make_shared(element::f32, PartialShape{4, Dimension(3, 6), Dimension(3, 6), 5}); - const auto axis = op::Constant::create(element::i8, {}, {1}); + const auto data = + make_shared(element::f32, PartialShape{4, Dimension(3, 6), Dimension(3, 6), 5}); + const auto axis = ov::op::v0::Constant::create(element::i8, {}, {1}); constexpr size_t num_splits = 2; const auto split = make_shared(data, axis, num_splits); @@ -90,8 +95,8 @@ TEST(type_prop, split_v1_axis_const_data_axis_dim_interval_known_divisible) { } TEST(type_prop, split_v1_axis_const_data_axis_dim_interval_known_upper_bound_divisible) { - const auto data = make_shared(element::f32, PartialShape{4, Dimension(2, 4), 3, 5}); - const auto axis = op::Constant::create(element::i8, {}, {1}); + const auto data = make_shared(element::f32, PartialShape{4, Dimension(2, 4), 3, 5}); + const auto axis = ov::op::v0::Constant::create(element::i8, {}, {1}); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -101,8 +106,8 @@ TEST(type_prop, split_v1_axis_const_data_axis_dim_interval_known_upper_bound_div } TEST(type_prop, split_v1_axis_const_invalid_data_axis_dim_interval_known) { - const auto data = make_shared(element::f32, PartialShape{4, Dimension(1, 2), 3, 5}); - const auto axis = op::Constant::create(element::i8, {}, {1}); + const auto data = make_shared(element::f32, PartialShape{4, Dimension(1, 2), 3, 5}); + const auto axis = ov::op::v0::Constant::create(element::i8, {}, {1}); constexpr size_t num_splits = 3; OV_EXPECT_THROW(const auto split = make_shared(data, axis, num_splits), @@ -112,8 +117,8 @@ TEST(type_prop, split_v1_axis_const_invalid_data_axis_dim_interval_known) { } TEST(type_prop, split_v1_axis_const_only_data_rank_known) { - const auto data = make_shared(element::f32, PartialShape::dynamic(4)); - const auto axis = op::Constant::create(element::u64, {}, {1}); + const auto data = make_shared(element::f32, PartialShape::dynamic(4)); + const auto axis = ov::op::v0::Constant::create(element::u64, {}, {1}); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -123,8 +128,8 @@ TEST(type_prop, split_v1_axis_const_only_data_rank_known) { } TEST(type_prop, split_v1_axis_param_only_data_rank_known) { - const auto data = make_shared(element::f32, PartialShape::dynamic(4)); - const auto axis = make_shared(element::u32, PartialShape{}); + const auto data = make_shared(element::f32, PartialShape::dynamic(4)); + const auto axis = make_shared(element::u32, PartialShape{}); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -134,8 +139,8 @@ TEST(type_prop, split_v1_axis_param_only_data_rank_known) { } TEST(type_prop, split_v1_axis_const_data_rank_unknown) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = op::Constant::create(element::u16, {}, {2}); + const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto axis = ov::op::v0::Constant::create(element::u16, {}, {2}); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -145,8 +150,8 @@ TEST(type_prop, split_v1_axis_const_data_rank_unknown) { } TEST(type_prop, split_v1_axis_param_data_rank_unknown) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = make_shared(element::u8, PartialShape{}); + const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto axis = make_shared(element::u8, PartialShape{}); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -156,8 +161,8 @@ TEST(type_prop, split_v1_axis_param_data_rank_unknown) { } TEST(type_prop, split_v1_axis_param_dynamic_ranks) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = make_shared(element::u8, PartialShape::dynamic()); + const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto axis = make_shared(element::u8, PartialShape::dynamic()); constexpr size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -167,8 +172,8 @@ TEST(type_prop, split_v1_axis_param_dynamic_ranks) { } TEST(type_prop, split_v1_invalid_axis_et_f32) { - auto data = make_shared(element::f32, Shape{2, 6}); - auto axis = op::Constant::create(element::f32, Shape{}, {1}); + auto data = make_shared(element::f32, Shape{2, 6}); + auto axis = ov::op::v0::Constant::create(element::f32, Shape{}, {1}); OV_EXPECT_THROW(const auto split = make_shared(data, axis, 2), NodeValidationFailure, @@ -176,8 +181,8 @@ TEST(type_prop, split_v1_invalid_axis_et_f32) { } TEST(type_prop, split_v1_invalid_axis_et_boolean) { - auto data = make_shared(element::f32, Shape{2, 6}); - auto axis = op::Constant::create(element::boolean, Shape{}, {1}); + auto data = make_shared(element::f32, Shape{2, 6}); + auto axis = ov::op::v0::Constant::create(element::boolean, Shape{}, {1}); OV_EXPECT_THROW(const auto split = make_shared(data, axis, 2), NodeValidationFailure, @@ -185,8 +190,8 @@ TEST(type_prop, split_v1_invalid_axis_et_boolean) { } TEST(type_prop, split_v1_invalid_axis_not_a_scalar) { - auto data = make_shared(element::i32, Shape{2, 6}); - auto axis = op::Constant::create(element::i64, Shape{2}, {0, 1}); + auto data = make_shared(element::i32, Shape{2, 6}); + auto axis = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 1}); OV_EXPECT_THROW(const auto split = make_shared(data, axis, 1), NodeValidationFailure, @@ -194,8 +199,8 @@ TEST(type_prop, split_v1_invalid_axis_not_a_scalar) { } TEST(type_prop, split_v1_invalid_num_splits) { - auto data = make_shared(element::i32, Shape{2, 6}); - auto axis = op::Constant::create(element::i64, Shape{}, {1}); + auto data = make_shared(element::i32, Shape{2, 6}); + auto axis = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); constexpr size_t num_splits = 0; OV_EXPECT_THROW(const auto split = make_shared(data, axis, num_splits), @@ -204,8 +209,8 @@ TEST(type_prop, split_v1_invalid_num_splits) { } TEST(type_prop, split_v1_invalid_axis_value) { - auto data = make_shared(element::i32, Shape{2, 6}); - auto axis = op::Constant::create(element::i64, Shape{}, {-5}); + auto data = make_shared(element::i32, Shape{2, 6}); + auto axis = ov::op::v0::Constant::create(element::i64, Shape{}, {-5}); constexpr size_t num_splits = 4; // axis value not in the range [-2, 1] @@ -215,8 +220,8 @@ TEST(type_prop, split_v1_invalid_axis_value) { } TEST(type_prop, split_v1_incompatible_data_shape_with_num_splits) { - auto data = make_shared(element::i32, Shape{2, 6}); - auto axis = op::Constant::create(element::i64, Shape{}, {1}); + auto data = make_shared(element::i32, Shape{2, 6}); + auto axis = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); constexpr size_t num_splits = 4; OV_EXPECT_THROW(const auto split = make_shared(data, axis, num_splits), @@ -276,7 +281,7 @@ INSTANTIATE_TEST_SUITE_P(type_prop_dynamic_shape, TEST_P(SplitTest, use_default_ctor) { constexpr auto dtype = element::f32; const auto param = make_shared(dtype, p_shape); - const auto axis_node = make_shared(element::i32, Shape{}, axis); + const auto axis_node = make_shared(element::i32, Shape{}, axis); const auto split = make_shared(); split->set_arguments(NodeVector{param, axis_node}); @@ -296,7 +301,7 @@ TEST_P(SplitTest, labels_propagation) { set_shape_labels(p_shape, in_labels); const auto param = make_shared(element::f32, p_shape); - const auto axis_node = make_shared(element::i32, Shape{}, axis); + const auto axis_node = make_shared(element::i32, Shape{}, axis); const auto split = make_shared(param, axis_node, num_splits); const auto outputs = split->outputs(); @@ -357,8 +362,8 @@ TEST_P(SplitBoundTest, propagate_label_and_dynamic_value) { set_shape_labels(p_shape, in_exp_labels.first); constexpr auto et = element::i64; - const auto labeled_param = std::make_shared(et, p_shape); - const auto labeled_shape_of = std::make_shared(labeled_param); + const auto labeled_param = std::make_shared(et, p_shape); + const auto labeled_shape_of = std::make_shared(labeled_param); const auto zero = std::vector{0}; const auto axis = std::make_shared(et, Shape{}, zero); diff --git a/src/core/tests/type_prop/sqrt.cpp b/src/core/tests/type_prop/sqrt.cpp index bd076b2054ad61..e9df961d3b9653 100644 --- a/src/core/tests/type_prop/sqrt.cpp +++ b/src/core/tests/type_prop/sqrt.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/sqrt.hpp" + #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_sqrt, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/squeeze.cpp b/src/core/tests/type_prop/squeeze.cpp index 2fea4dbd3f6b3c..3047fb9acd03fa 100644 --- a/src/core/tests/type_prop/squeeze.cpp +++ b/src/core/tests/type_prop/squeeze.cpp @@ -2,56 +2,61 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/squeeze.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/unsqueeze.hpp" #include "sequnce_generator.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, squeeze_axes_invalid_value) { - auto param = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto axes_node = make_shared(element::u64, Shape{2}, vector{0, 2}); + auto param = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto axes_node = make_shared(element::u64, Shape{2}, vector{0, 2}); - OV_EXPECT_THROW(auto s = make_shared(param, axes_node), + OV_EXPECT_THROW(auto s = make_shared(param, axes_node), NodeValidationFailure, HasSubstr("provided axis value is invalid. Only axes of size 1 may be removed.")); } TEST(type_prop, squeeze_axes_invalid_rank) { - auto param = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto axes_node = make_shared(element::i32, Shape{2, 1}, vector{0, 2}); + auto param = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto axes_node = make_shared(element::i32, Shape{2, 1}, vector{0, 2}); - OV_EXPECT_THROW(auto s = make_shared(param, axes_node), + OV_EXPECT_THROW(auto s = make_shared(param, axes_node), NodeValidationFailure, HasSubstr("Second input (axes) should not be of rank higher than 1.")); } TEST(type_prop, squeeze_incorrect_negative_axes) { - auto param = make_shared(element::f32, Shape{1, 4, 1, 4, 1, 8}); - auto axes_node = make_shared(element::i64, Shape{2}, vector{-6, -10}); + auto param = make_shared(element::f32, Shape{1, 4, 1, 4, 1, 8}); + auto axes_node = make_shared(element::i64, Shape{2}, vector{-6, -10}); - OV_EXPECT_THROW(auto s = make_shared(param, axes_node), + OV_EXPECT_THROW(auto s = make_shared(param, axes_node), ov::Exception, HasSubstr("Parameter axis -10 out of the tensor rank range")); } TEST(type_prop, squeeze_data_static_param_axes_1D_single_elem_static_shape_no_squeezable_dims) { auto param = std::make_shared(ov::element::f32, PartialShape{2, 2, 4}); - const auto axes_node = std::make_shared(element::u64, PartialShape{1}); + const auto axes_node = std::make_shared(element::u64, PartialShape{1}); - OV_EXPECT_THROW(auto s = make_shared(param, axes_node), + OV_EXPECT_THROW(auto s = make_shared(param, axes_node), NodeValidationFailure, HasSubstr("doesn't contain squeezable dimension")); } TEST(type_prop, squeeze_data_static_param_axes_1D_two_elem_static_shape_squeezable_dims_two) { auto param = std::make_shared(ov::element::f32, PartialShape{1, 2, 1, 4}); - const auto axes_node = std::make_shared(element::u64, PartialShape{2}); + const auto axes_node = std::make_shared(element::u64, PartialShape{2}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -60,7 +65,7 @@ TEST(type_prop, squeeze_data_static_param_axes_1D_two_elem_static_shape_squeezab TEST(type_prop, squeeze_data_static_param_axes_1D_two_elem_static_shape_squeezable_dims_one) { auto param = std::make_shared(ov::element::f32, PartialShape{2, 1, 4}); - const auto axes_node = std::make_shared(element::u64, PartialShape{2}); + const auto axes_node = std::make_shared(element::u64, PartialShape{2}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -69,7 +74,7 @@ TEST(type_prop, squeeze_data_static_param_axes_1D_two_elem_static_shape_squeezab TEST(type_prop, squeeze_data_static_param_axes_1D_single_elem_static_shape_squeezable_dims_one) { auto param = std::make_shared(ov::element::f32, PartialShape{2, 1, 4}); - const auto axes_node = std::make_shared(element::u64, PartialShape{1}); + const auto axes_node = std::make_shared(element::u64, PartialShape{1}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -78,7 +83,7 @@ TEST(type_prop, squeeze_data_static_param_axes_1D_single_elem_static_shape_squee TEST(type_prop, squeeze_data_static_param_axes_scalar_static_shape_squeezable_dims_one) { auto param = std::make_shared(ov::element::f32, PartialShape{2, 1, 4}); - const auto axes_node = std::make_shared(element::u64, PartialShape{}); + const auto axes_node = std::make_shared(element::u64, PartialShape{}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -87,7 +92,7 @@ TEST(type_prop, squeeze_data_static_param_axes_scalar_static_shape_squeezable_di TEST(type_prop, squeeze_data_scalar_param_axes_1D_single_elem_static_shape) { auto param = std::make_shared(ov::element::f32, PartialShape{}); - const auto axes_node = std::make_shared(element::u64, PartialShape{1}); + const auto axes_node = std::make_shared(element::u64, PartialShape{1}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -96,7 +101,7 @@ TEST(type_prop, squeeze_data_scalar_param_axes_1D_single_elem_static_shape) { TEST(type_prop, squeeze_data_dynamic_param_axes_1D_two_elem_static_shape_squeezable_dims_equal) { auto param = std::make_shared(ov::element::f32, PartialShape{-1, {2, 8}, {1, 3}, {4, -1}}); - const auto axes_node = std::make_shared(element::u64, PartialShape{2}); + const auto axes_node = std::make_shared(element::u64, PartialShape{2}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -105,7 +110,7 @@ TEST(type_prop, squeeze_data_dynamic_param_axes_1D_two_elem_static_shape_squeeza TEST(type_prop, squeeze_data_static_param_axes_1D_two_elem_static_shape_squeezable_dims_more) { auto param = std::make_shared(ov::element::f32, PartialShape{1, 2, 1, 3, 1}); - const auto axes_node = std::make_shared(element::u64, PartialShape{2}); + const auto axes_node = std::make_shared(element::u64, PartialShape{2}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -114,7 +119,7 @@ TEST(type_prop, squeeze_data_static_param_axes_1D_two_elem_static_shape_squeezab TEST(type_prop, squeeze_data_static_param_axes_1D_single_elem_static_shape_squeezable_dims_more) { auto param = std::make_shared(ov::element::f32, PartialShape{1, 2, 1, 3, 1}); - const auto axes_node = std::make_shared(element::u64, PartialShape{1}); + const auto axes_node = std::make_shared(element::u64, PartialShape{1}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -123,7 +128,7 @@ TEST(type_prop, squeeze_data_static_param_axes_1D_single_elem_static_shape_squee TEST(type_prop, squeeze_data_static_param_axes_scalar_static_shape_squeezable_dims_more) { auto param = std::make_shared(ov::element::f32, PartialShape{1, 2, 1, 3, 1}); - const auto axes_node = std::make_shared(element::u64, PartialShape{}); + const auto axes_node = std::make_shared(element::u64, PartialShape{}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -132,7 +137,7 @@ TEST(type_prop, squeeze_data_static_param_axes_scalar_static_shape_squeezable_di TEST(type_prop, squeeze_data_dynamic_param_axes_1D_two_elem_static_shape_squeezable_dims_more) { auto param = std::make_shared(ov::element::f32, PartialShape{-1, {2, 8}, {1, 3}, {4, -1}}); - const auto axes_node = std::make_shared(element::u64, PartialShape{2}); + const auto axes_node = std::make_shared(element::u64, PartialShape{2}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -141,7 +146,7 @@ TEST(type_prop, squeeze_data_dynamic_param_axes_1D_two_elem_static_shape_squeeza TEST(type_prop, squeeze_data_dynamic_param_axes_1D_single_elem_static_shape_squeezable_dims_more) { auto param = std::make_shared(ov::element::f32, PartialShape{-1, {2, 8}, {1, 3}, {4, -1}}); - const auto axes_node = std::make_shared(element::u64, PartialShape{1}); + const auto axes_node = std::make_shared(element::u64, PartialShape{1}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -150,7 +155,7 @@ TEST(type_prop, squeeze_data_dynamic_param_axes_1D_single_elem_static_shape_sque TEST(type_prop, squeeze_data_dynamic_param_axes_scalar_static_shape_squeezable_dims_more) { auto param = std::make_shared(ov::element::f32, PartialShape{-1, {2, 8}, {1, 3}, {4, -1}}); - const auto axes_node = std::make_shared(element::u64, PartialShape{}); + const auto axes_node = std::make_shared(element::u64, PartialShape{}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -159,7 +164,7 @@ TEST(type_prop, squeeze_data_dynamic_param_axes_scalar_static_shape_squeezable_d TEST(type_prop, squeeze_data_dyamic_param_axes_1D_two_elem_static_shape_squeezable_dims_one) { auto param = std::make_shared(ov::element::f32, PartialShape{2, -1, 4}); - const auto axes_node = std::make_shared(element::u64, PartialShape{2}); + const auto axes_node = std::make_shared(element::u64, PartialShape{2}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -168,7 +173,7 @@ TEST(type_prop, squeeze_data_dyamic_param_axes_1D_two_elem_static_shape_squeezab TEST(type_prop, squeeze_data_dynamic_param_axes_1D_three_elem_static_shape_squeezable_dims_two) { auto param = std::make_shared(ov::element::f32, PartialShape{-1, {2, 8}, {1, 3}, {4, -1}}); - const auto axes_node = std::make_shared(element::u64, PartialShape{3}); + const auto axes_node = std::make_shared(element::u64, PartialShape{3}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -177,7 +182,7 @@ TEST(type_prop, squeeze_data_dynamic_param_axes_1D_three_elem_static_shape_squee TEST(type_prop, squeeze_data_dynamic_param_axes_1D_single_elem_static_shape_squeezable_dims_less) { auto param = std::make_shared(ov::element::f32, PartialShape{-1, {2, 8}, {1, 3}, {4, -1}}); - const auto axes_node = std::make_shared(element::u64, PartialShape{1}); + const auto axes_node = std::make_shared(element::u64, PartialShape{1}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -276,7 +281,7 @@ INSTANTIATE_TEST_SUITE_P(type_prop_shrink_shape_default_axes, PrintToStringParamName()); TEST_P(SqueezeTest, partial_shape_dimension_propagation_const_axis_i32) { - const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -284,7 +289,7 @@ TEST_P(SqueezeTest, partial_shape_dimension_propagation_const_axis_i32) { } TEST_P(SqueezeTest, partial_shape_dimension_propagation_parameter_axes_no_data) { - const auto axes_node = std::make_shared(element::u64, PartialShape{Shape{axes.size()}}); + const auto axes_node = std::make_shared(element::u64, PartialShape{Shape{axes.size()}}); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -292,7 +297,7 @@ TEST_P(SqueezeTest, partial_shape_dimension_propagation_parameter_axes_no_data) } TEST_P(SqueezeTest, partial_shape_dimension_propagation_dynamic_axes) { - const auto axes_node = std::make_shared(element::u64, PartialShape::dynamic()); + const auto axes_node = std::make_shared(element::u64, PartialShape::dynamic()); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -307,9 +312,9 @@ TEST_P(SqueezeTest, labels_propagation) { std::tie(in_labels, exp_labels) = make_in_exp_labels(); set_shape_labels(p_shape, in_labels); - param = make_shared(element::f32, p_shape); + param = make_shared(element::f32, p_shape); - const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(get_shape_labels(squeeze->get_output_partial_shape(0)), exp_labels); @@ -323,8 +328,8 @@ INSTANTIATE_TEST_SUITE_P(type_prop_shrink_shape_no_axes, PrintToStringParamName()); TEST_P(SqueezeShapeTests, shape_dimension_propagation_const_axis_i64) { - param = std::make_shared(element::f64, p_shape.to_shape()); - const auto axes_node = std::make_shared(element::i64, Shape{axes.size()}, axes); + param = std::make_shared(element::f64, p_shape.to_shape()); + const auto axes_node = std::make_shared(element::i64, Shape{axes.size()}, axes); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f64); @@ -356,7 +361,7 @@ INSTANTIATE_TEST_SUITE_P( PrintToStringParamName()); TEST_P(SqueezeScalarAxisTest, axis_value_as_vector) { - const auto axes_node = std::make_shared(element::i32, Shape{}, axes); + const auto axes_node = std::make_shared(element::i32, Shape{}, axes); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -364,7 +369,7 @@ TEST_P(SqueezeScalarAxisTest, axis_value_as_vector) { } TEST_P(SqueezeScalarAxisTest, axis_value_as_integer) { - const auto axes_node = std::make_shared(element::i32, Shape{}, axes.front()); + const auto axes_node = std::make_shared(element::i32, Shape{}, axes.front()); const auto squeeze = std::make_shared(param, axes_node); EXPECT_EQ(squeeze->get_element_type(), element::f32); @@ -398,8 +403,8 @@ TEST_P(SqueezeBoundTest, propagate_label_and_dynamic_value) { set_shape_labels(labeled_shape, in_labels); constexpr auto et = element::i64; - const auto labeled_param = std::make_shared(et, labeled_shape); - const auto labeled_shape_of = std::make_shared(labeled_param); + const auto labeled_param = std::make_shared(et, labeled_shape); + const auto labeled_shape_of = std::make_shared(labeled_param); const auto zero = std::vector{0}; const auto axis = std::make_shared(et, Shape{}, zero); diff --git a/src/core/tests/type_prop/strided_slice.cpp b/src/core/tests/type_prop/strided_slice.cpp index 58b30bc03818b4..c0c6c31c963a49 100644 --- a/src/core/tests/type_prop/strided_slice.cpp +++ b/src/core/tests/type_prop/strided_slice.cpp @@ -3,23 +3,24 @@ // #include -#include #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/core/except.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/opsets/opset9.hpp" +#include "strided_slice_shape_inference.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, strided_slice_begin_incorrect_type) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::f16, Shape{4}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::f16, Shape{4}); + auto end = make_shared(element::i64, Shape{4}); try { auto strided_slice = make_shared(data, begin, @@ -36,9 +37,9 @@ TEST(type_prop, strided_slice_begin_incorrect_type) { } TEST(type_prop, strided_slice_end_incorrect_type) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::boolean, Shape{4}); + auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::i64, Shape{4}); + auto end = make_shared(element::boolean, Shape{4}); try { auto strided_slice = make_shared(data, begin, @@ -55,9 +56,9 @@ TEST(type_prop, strided_slice_end_incorrect_type) { } TEST(type_prop, strided_slice_incompatible_size_of_masks_attr) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::i64, Shape{4}); + auto end = make_shared(element::i64, Shape{4}); try { auto strided_slice = make_shared(data, begin, @@ -75,9 +76,9 @@ TEST(type_prop, strided_slice_incompatible_size_of_masks_attr) { } TEST(type_prop, strided_slice_mask_incorrect_value) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4, 5}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::i64, Shape{4, 5}); + auto end = make_shared(element::i64, Shape{4}); try { auto strided_slice = make_shared(data, begin, @@ -94,9 +95,9 @@ TEST(type_prop, strided_slice_mask_incorrect_value) { } TEST(type_prop, strided_slice_begin_incorrect_shape) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4, 5}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::i64, Shape{4, 5}); + auto end = make_shared(element::i64, Shape{4}); OV_EXPECT_THROW(auto strided_slice = make_shared(data, begin, @@ -108,9 +109,9 @@ TEST(type_prop, strided_slice_begin_incorrect_shape) { } TEST(type_prop, strided_slice_end_incorrect_shape) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::i64, Shape{4, 5}); + auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::i64, Shape{4}); + auto end = make_shared(element::i64, Shape{4, 5}); OV_EXPECT_THROW(auto strided_slice = make_shared(data, begin, @@ -122,23 +123,23 @@ TEST(type_prop, strided_slice_end_incorrect_shape) { } TEST(type_prop, strided_slice_default_stride_dynamic_shape_input_begin_not_1d) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, PartialShape::dynamic()); - const auto end = make_shared(element::i64, PartialShape::dynamic()); + auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::i64, PartialShape::dynamic()); + const auto end = make_shared(element::i64, PartialShape::dynamic()); OV_EXPECT_THROW( const auto strided_slice = make_shared(data, begin, end, vector{0, 0}, vector{0, 0}), - CheckFailure, + AssertFailure, HasSubstr("Begin input must be 1D")); } TEST(type_prop, strided_slice_default_stride_dynamic_shape_input) { auto shape = PartialShape{2, 4, 6, 8}; set_shape_labels(shape, 11); - auto data = make_shared(element::f32, shape); - auto begin = make_shared(element::i64, PartialShape::dynamic()); - auto end = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::f32, shape); + auto begin = make_shared(element::i64, PartialShape::dynamic()); + auto end = make_shared(element::i64, Shape{2}); auto strided_slice = make_shared(data, begin, end, vector{0, 0}, vector{0, 0}); @@ -151,10 +152,10 @@ TEST(type_prop, strided_slice_default_stride_dynamic_shape_input) { TEST(type_prop, strided_slice_reverse_out_of_bounds_on_dims_0_1) { auto shape = PartialShape{3, 4, 5}; set_shape_labels(shape, 10); - auto data = std::make_shared(element::f32, shape); - auto begin = op::Constant::create(element::i64, Shape{2}, {10, 2}); - auto end = op::Constant::create(element::i64, Shape{2}, {-10, -10}); - auto stride = op::Constant::create(element::i64, Shape{2}, {-1}); + auto data = std::make_shared(element::f32, shape); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{2}, {10, 2}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{2}, {-10, -10}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{2}, {-1}); auto mask = std::vector(shape.size(), 0); @@ -167,10 +168,10 @@ TEST(type_prop, strided_slice_reverse_out_of_bounds_on_dims_0_1) { TEST(type_prop, strided_slice_ignore_begin_mask_stride_pos_1) { auto shape = PartialShape{4, 4, 4, 4, 4, 4, 4, 4}; set_shape_labels(shape, 10); - auto data = std::make_shared(element::f32, shape); - auto begin = op::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, 10}); - auto end = op::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); - auto stride = op::Constant::create(element::i64, Shape{shape.size()}, {1}); + auto data = std::make_shared(element::f32, shape); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, 10}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1}); auto begin_mask = std::vector(shape.size(), 1); auto end_mask = std::vector(shape.size(), 0); @@ -186,10 +187,10 @@ TEST(type_prop, strided_slice_ignore_begin_mask_stride_pos_1) { TEST(type_prop, strided_slice_ignore_begin_mask_stride_neg_1) { auto shape = PartialShape{4, 4, 4, 4, 4, 4, 4, 4}; set_shape_labels(shape, 10); - auto data = std::make_shared(element::f32, shape); - auto begin = op::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, -10}); - auto end = op::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); - auto stride = op::Constant::create(element::i64, Shape{shape.size()}, {-1, -1, -1, -1, -1, -1, -1, -1}); + auto data = std::make_shared(element::f32, shape); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, -10}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {-1, -1, -1, -1, -1, -1, -1, -1}); auto begin_mask = std::vector(shape.size(), 1); auto end_mask = std::vector(shape.size(), 0); @@ -211,10 +212,10 @@ TEST(type_prop, strided_slice_ignore_begin_mask_stride_neg_1) { TEST(type_prop, strided_slice_ignore_end_mask_stride_pos_1) { auto shape = PartialShape{4, 4, 4, 4, 4, 4, 4, 4}; set_shape_labels(shape, 10); - auto data = std::make_shared(element::f32, shape); - auto begin = op::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, -10}); - auto end = op::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); - auto stride = op::Constant::create(element::i64, Shape{shape.size()}, {1}); + auto data = std::make_shared(element::f32, shape); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, -10}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1}); auto begin_mask = std::vector(shape.size(), 0); auto end_mask = std::vector(shape.size(), 1); @@ -229,10 +230,10 @@ TEST(type_prop, strided_slice_ignore_end_mask_stride_pos_1) { TEST(type_prop, strided_slice_ignore_end_mask_stride_neg_1) { auto shape = PartialShape{4, 4, 4, 4, 4, 4, 4, 4}; set_shape_labels(shape, 10); - auto data = std::make_shared(element::f32, shape); - auto begin = op::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, -10}); - auto end = op::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); - auto stride = op::Constant::create(element::i64, Shape{shape.size()}, {-1, -1, -1, -1, -1, -1, -1, -1}); + auto data = std::make_shared(element::f32, shape); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, -10}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {-1, -1, -1, -1, -1, -1, -1, -1}); auto begin_mask = std::vector(shape.size(), 0); auto end_mask = std::vector(shape.size(), 1); @@ -247,10 +248,10 @@ TEST(type_prop, strided_slice_ignore_end_mask_stride_neg_1) { TEST(type_prop, strided_slice_ignore_begin_end_masks_variadic_stride) { auto shape = PartialShape{4, 4, 4, 4, 4, 4, 4, 4}; set_shape_labels(shape, 10); - auto data = std::make_shared(element::f32, shape); - auto begin = op::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, -10}); - auto end = op::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); - auto stride = op::Constant::create(element::i64, Shape{shape.size()}, {1, 2, 3, 10, -1, -2, -3, -10}); + auto data = std::make_shared(element::f32, shape); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {0, 2, 4, 10, -1, -2, -4, -10}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1, 3, 4, 5, -1, -3, -4, -5}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1, 2, 3, 10, -1, -2, -3, -10}); auto mask = std::vector(shape.size(), 1); @@ -265,10 +266,10 @@ TEST(type_prop, strided_slice_ignore_begin_end_masks_variadic_stride) { TEST(type_prop, strided_slice_end_over_dimension_size) { auto shape = PartialShape{3, 3, 3, 3, 3, 3, 3, 3}; set_shape_labels(shape, 10); - auto data = std::make_shared(element::f32, shape); - auto begin = op::Constant::create(element::i64, Shape{shape.size()}, {0, 0, 0, 0, 1, 1, 1, 1}); - auto end = op::Constant::create(element::i64, Shape{shape.size()}, {1, 2, 3, 4, 1, 2, 3, 4}); - auto stride = op::Constant::create(element::i64, Shape{shape.size()}, {1}); + auto data = std::make_shared(element::f32, shape); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {0, 0, 0, 0, 1, 1, 1, 1}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1, 2, 3, 4, 1, 2, 3, 4}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1}); auto mask = std::vector(shape.size(), 0); @@ -283,10 +284,10 @@ TEST(type_prop, strided_slice_end_over_dimension_size) { TEST(type_prop, strided_slice_begin_over_dimension_size) { auto shape = PartialShape{3, 3, 3, 3, 3, 3, 3, 3}; set_shape_labels(shape, 10); - auto data = std::make_shared(element::f32, shape); - auto begin = op::Constant::create(element::i64, Shape{shape.size()}, {-1, -1, -1, -1, -2, -2, -2, -2}); - auto end = op::Constant::create(element::i64, Shape{shape.size()}, {1, 2, 3, 4, 1, 2, 3, 4}); - auto stride = op::Constant::create(element::i64, Shape{shape.size()}, {1}); + auto data = std::make_shared(element::f32, shape); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {-1, -1, -1, -1, -2, -2, -2, -2}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1, 2, 3, 4, 1, 2, 3, 4}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{shape.size()}, {1}); auto mask = std::vector(shape.size(), 0); @@ -299,12 +300,12 @@ TEST(type_prop, strided_slice_begin_over_dimension_size) { TEST(type_prop, strided_slice_end_is_shape_of_with_bounds) { auto shape = PartialShape{1, {5, 7}}; set_shape_labels(shape, 20); - const auto p_end = std::make_shared(element::i64, shape); - const auto shape_of_end = std::make_shared(p_end); + const auto p_end = std::make_shared(element::i64, shape); + const auto shape_of_end = std::make_shared(p_end); - auto data = op::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); - auto begin = op::Constant::create(element::i64, Shape{2}, {0, 0}); - auto stride = op::Constant::create(element::i64, Shape{2}, {1, 1}); + auto data = ov::op::v0::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, 0}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 1}); auto mask = std::vector(2, 0); @@ -317,12 +318,12 @@ TEST(type_prop, strided_slice_end_is_shape_of_with_bounds) { TEST(type_prop, strided_slice_begin_is_shape_of_with_bounds) { auto shape = PartialShape{0, {3, 5}}; set_shape_labels(shape, 20); - const auto p_begin = std::make_shared(element::i64, shape); - const auto shape_of_begin = std::make_shared(p_begin); + const auto p_begin = std::make_shared(element::i64, shape); + const auto shape_of_begin = std::make_shared(p_begin); - auto data = op::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); - auto end = op::Constant::create(element::i64, Shape{2}, {1, 7}); - auto stride = op::Constant::create(element::i64, Shape{2}, {1, 1}); + auto data = ov::op::v0::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 7}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 1}); auto mask = std::vector(2, 0); @@ -337,13 +338,13 @@ TEST(type_prop, strided_slice_begin_end_is_shape_of_with_bounds) { auto end_shape = PartialShape{2, {6, 7}}; set_shape_labels(begin_shape, 10); set_shape_labels(end_shape, 20); - const auto p_begin = std::make_shared(element::i64, begin_shape); - const auto p_end = std::make_shared(element::i64, end_shape); - const auto shape_of_begin = std::make_shared(p_begin); - const auto shape_of_end = std::make_shared(p_end); + const auto p_begin = std::make_shared(element::i64, begin_shape); + const auto p_end = std::make_shared(element::i64, end_shape); + const auto shape_of_begin = std::make_shared(p_begin); + const auto shape_of_end = std::make_shared(p_end); - auto data = op::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); - auto stride = op::Constant::create(element::i64, Shape{2}, {1, 1}); + auto data = ov::op::v0::Constant::create(element::i64, Shape{1, 10}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 1}); auto mask = std::vector(2, 0); @@ -354,11 +355,11 @@ TEST(type_prop, strided_slice_begin_end_is_shape_of_with_bounds) { } TEST(type_prop, strided_slice_out_of_bounds_different_stride) { - auto data = std::make_shared(element::f32, PartialShape{5, 5, 5, 5, 5}); + auto data = std::make_shared(element::f32, PartialShape{5, 5, 5, 5, 5}); const auto data_rank_size = data->get_partial_shape().size(); - auto begin = op::Constant::create(element::i64, Shape{data_rank_size}, {5, 5, 5, 5, 5}); - auto end = op::Constant::create(element::i64, Shape{data_rank_size}, {5, 5, 5, 5, 5}); - auto stride = op::Constant::create(element::i64, Shape{data_rank_size}, {1, 2, 5, -2, -5}); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{data_rank_size}, {5, 5, 5, 5, 5}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{data_rank_size}, {5, 5, 5, 5, 5}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{data_rank_size}, {1, 2, 5, -2, -5}); const auto mask = std::vector(data_rank_size, 0); @@ -368,11 +369,12 @@ TEST(type_prop, strided_slice_out_of_bounds_different_stride) { } TEST(type_prop, strided_slice_reverse_end_is_int64_min) { - auto data = std::make_shared(element::f32, PartialShape{{0, 20}, -1}); + auto data = std::make_shared(element::f32, PartialShape{{0, 20}, -1}); const auto data_rank_size = data->get_partial_shape().size(); - auto begin = op::Constant::create(element::i64, Shape{data_rank_size}, {20, 20}); - auto end = op::Constant::create(element::i64, Shape{data_rank_size}, std::vector{INT64_MIN, INT64_MIN}); - auto stride = op::Constant::create(element::i64, Shape{data_rank_size}, std::vector{-1, -1}); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{data_rank_size}, {20, 20}); + auto end = + ov::op::v0::Constant::create(element::i64, Shape{data_rank_size}, std::vector{INT64_MIN, INT64_MIN}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{data_rank_size}, std::vector{-1, -1}); const auto mask = std::vector(data_rank_size, 0); @@ -387,9 +389,9 @@ TEST(type_prop, strided_slice_dynamic_value_and_label_propagation) { ov::DimensionTracker::set_label(marked_0, 10); PartialShape target_0 = PartialShape{marked_0, 4}; - auto param = std::make_shared(element::f32, Shape{1}); - auto param_0 = std::make_shared(element::f32, target_0); - auto shape_0 = std::make_shared(param_0); + auto param = std::make_shared(element::f32, Shape{1}); + auto param_0 = std::make_shared(element::f32, target_0); + auto shape_0 = std::make_shared(param_0); constexpr auto et = element::i64; std::vector start_val{0}, stop_val{1}, step_val{1}; @@ -413,10 +415,10 @@ TEST(type_prop, strided_slice_dynamic_value_and_label_propagation) { TEST(type_prop, strided_slice_use_default_ctor) { const auto zero_mask = std::vector(3, 0); - auto data = std::make_shared(element::f32, PartialShape{10, 11, 12}); - auto begin = op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); - auto end = op::Constant::create(element::i64, Shape{4}, {1, 5, 20, 20}); - auto stride = op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}); + auto data = std::make_shared(element::f32, PartialShape{10, 11, 12}); + auto begin = ov::op::v0::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); + auto end = ov::op::v0::Constant::create(element::i64, Shape{4}, {1, 5, 20, 20}); + auto stride = ov::op::v0::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}); auto slice = std::make_shared(); slice->set_begin_mask(zero_mask); @@ -431,10 +433,10 @@ TEST(type_prop, strided_slice_use_default_ctor) { } TEST(type_prop, strided_slice_inf_dim_start_from_last_N_to_end) { - auto data = std::make_shared(element::f32, PartialShape{1, 256, -1}); - auto start = op::Constant::create(element::i64, Shape{3}, {0, 0, -7}); - auto stop = op::Constant::create(element::i64, Shape{3}, std::vector{0, 0, INT64_MAX}); - auto step = op::Constant::create(element::i64, Shape{3}, {1, 1, 1}); + auto data = std::make_shared(element::f32, PartialShape{1, 256, -1}); + auto start = ov::op::v0::Constant::create(element::i64, Shape{3}, {0, 0, -7}); + auto stop = ov::op::v0::Constant::create(element::i64, Shape{3}, std::vector{0, 0, INT64_MAX}); + auto step = ov::op::v0::Constant::create(element::i64, Shape{3}, {1, 1, 1}); const auto slice = std::make_shared(data, start, @@ -447,9 +449,9 @@ TEST(type_prop, strided_slice_inf_dim_start_from_last_N_to_end) { } TEST(type_prop, strided_slice_different_ranks) { - auto data = std::make_shared(element::f32, PartialShape{1, 2, 3, 4}); - auto start = op::Constant::create(element::i64, Shape{1}, {0}); - auto stop = op::Constant::create(element::i64, Shape{1}, std::vector{INT64_MAX}); + auto data = std::make_shared(element::f32, PartialShape{1, 2, 3, 4}); + auto start = ov::op::v0::Constant::create(element::i64, Shape{1}, {0}); + auto stop = ov::op::v0::Constant::create(element::i64, Shape{1}, std::vector{INT64_MAX}); const auto slice = std::make_shared(data, start, @@ -461,9 +463,9 @@ TEST(type_prop, strided_slice_different_ranks) { } TEST(type_prop, strided_slice_different_ranks_long_masks) { - auto data = std::make_shared(element::f32, PartialShape{1, 2, 3, 4}); - auto start = op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); - auto stop = op::Constant::create(element::i64, Shape{4}, std::vector{2, 2, 2, 2}); + auto data = std::make_shared(element::f32, PartialShape{1, 2, 3, 4}); + auto start = ov::op::v0::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); + auto stop = ov::op::v0::Constant::create(element::i64, Shape{4}, std::vector{2, 2, 2, 2}); const auto slice = std::make_shared(data, start, diff --git a/src/core/tests/type_prop/swish.cpp b/src/core/tests/type_prop/swish.cpp index b9d6e687618dac..fe94d4ef2130f3 100644 --- a/src/core/tests/type_prop/swish.cpp +++ b/src/core/tests/type_prop/swish.cpp @@ -2,33 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/swish.hpp" + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, swish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); auto swish_func = make_shared(data); EXPECT_EQ(swish_func->get_element_type(), element::f32); EXPECT_EQ(swish_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, swish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto swish_func = make_shared(data); EXPECT_EQ(swish_func->get_element_type(), element::f32); ASSERT_TRUE(swish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown - auto swish_partial = make_shared(make_shared(element::f32, PartialShape::dynamic())); + auto swish_partial = + make_shared(make_shared(element::f32, PartialShape::dynamic())); ASSERT_TRUE(swish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, swish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); auto swish_func = make_shared(data); EXPECT_EQ(swish_func->get_element_type(), element::f32); ASSERT_TRUE(swish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); @@ -36,8 +37,8 @@ TEST(type_prop, swish_partial_static_rank) { } TEST(type_prop, swish_incompatible_types) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f16, Shape{}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::f16, Shape{}); try { const auto swish_func = make_shared(data, beta); FAIL() << "swish_func node was created with incompatible input data types."; @@ -47,8 +48,8 @@ TEST(type_prop, swish_incompatible_types) { } TEST(type_prop, swish_beta_not_scalar) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f32, Shape{1}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::f32, Shape{1}); try { const auto swish_func = make_shared(data, beta); FAIL() << "swish_func node was created with scalar beta value."; @@ -58,8 +59,8 @@ TEST(type_prop, swish_beta_not_scalar) { } TEST(type_prop, swish_2_inputs) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f32, Shape{}); + auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::f32, Shape{}); const auto swish_func = make_shared(data, beta); EXPECT_EQ(swish_func->get_element_type(), element::f32); @@ -68,13 +69,13 @@ TEST(type_prop, swish_2_inputs) { } TEST(type_prop, swish_incompatible_type_boolean) { - auto data = make_shared(element::boolean, Shape{1, 3, 6}); - auto beta = make_shared(element::f32, Shape{}); - ASSERT_THROW(const auto unused = make_shared(data, beta);, ngraph::NodeValidationFailure); + auto data = make_shared(element::boolean, Shape{1, 3, 6}); + auto beta = make_shared(element::f32, Shape{}); + ASSERT_THROW(const auto unused = make_shared(data, beta);, ov::NodeValidationFailure); } TEST(type_prop, swish_incompatible_types_u32) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::u32, Shape{}); - ASSERT_THROW(const auto unused = make_shared(data, beta);, ngraph::NodeValidationFailure); + auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::u32, Shape{}); + ASSERT_THROW(const auto unused = make_shared(data, beta);, ov::NodeValidationFailure); } diff --git a/src/core/tests/type_prop/tan.cpp b/src/core/tests/type_prop/tan.cpp index 6f77a38c4345c7..c25281e5054d31 100644 --- a/src/core/tests/type_prop/tan.cpp +++ b/src/core/tests/type_prop/tan.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/tan.hpp" + #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_tan, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/tanh.cpp b/src/core/tests/type_prop/tanh.cpp index 3ad2e6f894599d..1e7f1c076f420f 100644 --- a/src/core/tests/type_prop/tanh.cpp +++ b/src/core/tests/type_prop/tanh.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/tanh.hpp" + #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_tanh, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/tensor_iterator.cpp b/src/core/tests/type_prop/tensor_iterator.cpp index a95aa24df620f0..bab7c4265c671d 100644 --- a/src/core/tests/type_prop/tensor_iterator.cpp +++ b/src/core/tests/type_prop/tensor_iterator.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/tensor_iterator.hpp" + #include #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "ngraph/builder/reshape.hpp" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset5.hpp" +#include "openvino/core/model.hpp" +#include "openvino/opsets/opset5.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, tensor_iterator_lstm) { // That which we iterate over @@ -19,31 +20,31 @@ TEST(type_prop, tensor_iterator_lstm) { const size_t L = 10; // Sequence length const size_t I = 8; // Input size const size_t H = 32; // Hidden size - auto SENT = make_shared(element::f32, Shape{N, L, I}); + auto SENT = make_shared(element::f32, Shape{N, L, I}); - auto H_init = make_shared(element::f32, Shape{N, 1, H}); - auto C_init = make_shared(element::f32, Shape{N, 1, H}); + auto H_init = make_shared(element::f32, Shape{N, 1, H}); + auto C_init = make_shared(element::f32, Shape{N, 1, H}); - auto W = make_shared(element::f32, Shape{4 * H, I}); - auto R = make_shared(element::f32, Shape{4 * H, H}); - auto H_t = make_shared(element::f32, Shape{N, 1, H}); - auto C_t = make_shared(element::f32, Shape{N, 1, H}); + auto W = make_shared(element::f32, Shape{4 * H, I}); + auto R = make_shared(element::f32, Shape{4 * H, H}); + auto H_t = make_shared(element::f32, Shape{N, 1, H}); + auto C_t = make_shared(element::f32, Shape{N, 1, H}); // Body - auto X = make_shared(element::f32, Shape{N, 1, I}); - auto W_body = make_shared(element::f32, Shape{4 * H, I}); - auto R_body = make_shared(element::f32, Shape{4 * H, H}); - auto LSTM_cell = make_shared(builder::opset1::reshape(X, Shape{N, I}), - builder::opset1::reshape(H_t, Shape{N, H}), - builder::opset1::reshape(C_t, Shape{N, H}), + auto X = make_shared(element::f32, Shape{N, 1, I}); + auto W_body = make_shared(element::f32, Shape{4 * H, I}); + auto R_body = make_shared(element::f32, Shape{4 * H, H}); + auto LSTM_cell = make_shared(ngraph::builder::opset1::reshape(X, Shape{N, I}), + ngraph::builder::opset1::reshape(H_t, Shape{N, H}), + ngraph::builder::opset1::reshape(C_t, Shape{N, H}), W_body, R_body, H); - auto H_o = builder::opset1::reshape(LSTM_cell->output(0), Shape{N, 1, H}); - auto C_o = builder::opset1::reshape(LSTM_cell->output(1), Shape{N, 1, H}); - auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); + auto H_o = ngraph::builder::opset1::reshape(LSTM_cell->output(0), Shape{N, 1, H}); + auto C_o = ngraph::builder::opset1::reshape(LSTM_cell->output(1), Shape{N, 1, H}); + auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); - auto tensor_iterator = make_shared(); + auto tensor_iterator = make_shared(); tensor_iterator->set_body(body); // start=0, stride=1, part_size=1, end=39, axis=1 tensor_iterator->set_sliced_input(X, SENT, 0, 1, 1, -1, 1); @@ -58,27 +59,27 @@ TEST(type_prop, tensor_iterator_lstm) { // Output 1 is last Co, result 1 of body auto out1 = tensor_iterator->get_iter_value(C_o, -1); - auto results = ResultVector{make_shared(out0), make_shared(out1)}; - auto f = make_shared(results, ParameterVector{SENT, H_init, C_init, W, R}); + auto results = ResultVector{make_shared(out0), make_shared(out1)}; + auto f = make_shared(results, ParameterVector{SENT, H_init, C_init, W, R}); } TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto M = make_shared(element::f32, Shape{32, 2, 10}); + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto M = make_shared(element::f32, Shape{32, 2, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, Shape{32, 2, 10}); - auto Yi = make_shared(element::f32, Shape{32, 2, 10}); - auto M_body = make_shared(element::f32, Shape{32, 2, 10}); + auto Xi = make_shared(element::f32, Shape{32, 2, 10}); + auto Yi = make_shared(element::f32, Shape{32, 2, 10}); + auto M_body = make_shared(element::f32, Shape{32, 2, 10}); // Body auto Zo = std::make_shared(std::make_shared(Xi, Yi), M_body); - auto body = make_shared(OutputVector{Zo}, ParameterVector{Xi, Yi, M_body}); + auto body = make_shared(OutputVector{Zo}, ParameterVector{Xi, Yi, M_body}); - auto tensor_iterator = make_shared(); + auto tensor_iterator = make_shared(); tensor_iterator->set_body(body); // The Xi are the elements of Xseq // start=0, stride=2, part_size=2, end=39, axis=1 @@ -93,34 +94,34 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2) { // Output 1 is concat of Zos // start=0, stride=2, part_size=2, end=39, axis=1 auto out1 = tensor_iterator->get_concatenated_slices(Zo, 0, 2, 2, 39, 1); - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); Shape out0_shape{32, 2, 10}; Shape out1_shape{32, 40, 10}; auto results = ResultVector{result0, result1}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); } TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto M = make_shared(element::f32, Shape{32, 2, 10}); + auto X = make_shared(element::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::f32, Shape{32, 40, 10}); + auto M = make_shared(element::f32, Shape{32, 2, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto Xi = make_shared(element::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::f32, PartialShape::dynamic()); // Body auto Zo = std::make_shared(std::make_shared(Xi, Yi), M_body); - auto body = make_shared(OutputVector{Zo}, ParameterVector{Xi, Yi, M_body}); + auto body = make_shared(OutputVector{Zo}, ParameterVector{Xi, Yi, M_body}); - auto tensor_iterator = make_shared(); + auto tensor_iterator = make_shared(); tensor_iterator->set_body(body); // The Xi are the elements of Xseq // start=0, stride=2, part_size=2, end=38, axis=1 @@ -134,13 +135,13 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic) { for (auto& desc : tensor_iterator->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = ov::as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -155,20 +156,20 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic) { for (auto& desc : tensor_iterator->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = ov::as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } - auto result0 = make_shared(out0); - auto result1 = make_shared(out1); + auto result0 = make_shared(out0); + auto result1 = make_shared(out1); Shape out0_shape{32, 2, 10}; Shape out1_shape{32, 38, 10}; auto results = ResultVector{result0, result1}; - auto f = make_shared(results, ParameterVector{X, Y, M}); + auto f = make_shared(results, ParameterVector{X, Y, M}); EXPECT_EQ(result0->get_output_shape(0), out0_shape); EXPECT_EQ(result1->get_output_shape(0), out1_shape); @@ -182,31 +183,31 @@ TEST(type_prop, tensor_iterator_with_dynamic_reshape) { const size_t I = 8; // Input size const size_t H = 32; // Hidden size - auto SENT = make_shared(element::f32, Shape{N, L, I}); + auto SENT = make_shared(element::f32, Shape{N, L, I}); - auto H_init = make_shared(element::f32, Shape{N, 1, H}); - auto C_init = make_shared(element::f32, Shape{N, 1, H}); + auto H_init = make_shared(element::f32, Shape{N, 1, H}); + auto C_init = make_shared(element::f32, Shape{N, 1, H}); - auto W = make_shared(element::f32, Shape{4 * H, I}); - auto R = make_shared(element::f32, Shape{4 * H, H}); - auto H_t = make_shared(element::f32, Shape{N, 1, H}); - auto C_t = make_shared(element::f32, Shape{N, 1, H}); + auto W = make_shared(element::f32, Shape{4 * H, I}); + auto R = make_shared(element::f32, Shape{4 * H, H}); + auto H_t = make_shared(element::f32, Shape{N, 1, H}); + auto C_t = make_shared(element::f32, Shape{N, 1, H}); // Body - auto X = make_shared(element::f32, Shape{N, 1, I}); - auto W_body = make_shared(element::f32, Shape{4 * H, I}); - auto R_body = make_shared(element::f32, Shape{4 * H, H}); - auto LSTM_cell = make_shared(builder::opset1::reshape(X, Shape{N, I}), - builder::opset1::reshape(H_t, Shape{N, H}), - builder::opset1::reshape(C_t, Shape{N, H}), + auto X = make_shared(element::f32, Shape{N, 1, I}); + auto W_body = make_shared(element::f32, Shape{4 * H, I}); + auto R_body = make_shared(element::f32, Shape{4 * H, H}); + auto LSTM_cell = make_shared(ngraph::builder::opset1::reshape(X, Shape{N, I}), + ngraph::builder::opset1::reshape(H_t, Shape{N, H}), + ngraph::builder::opset1::reshape(C_t, Shape{N, H}), W_body, R_body, H); - auto H_o = builder::opset1::reshape(LSTM_cell->output(0), Shape{N, 1, H}); - auto C_o = builder::opset1::reshape(LSTM_cell->output(1), Shape{N, 1, H}); - auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); + auto H_o = ngraph::builder::opset1::reshape(LSTM_cell->output(0), Shape{N, 1, H}); + auto C_o = ngraph::builder::opset1::reshape(LSTM_cell->output(1), Shape{N, 1, H}); + auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); - auto tensor_iterator = make_shared(); + auto tensor_iterator = make_shared(); tensor_iterator->set_body(body); // start=0, stride=1, part_size=1, end=39, axis=1 tensor_iterator->set_sliced_input(X, SENT, 0, 1, 1, -1, 1); @@ -221,8 +222,8 @@ TEST(type_prop, tensor_iterator_with_dynamic_reshape) { // Output 1 is last Co, result 1 of body auto out1 = tensor_iterator->get_iter_value(C_o, -1); - auto results = ResultVector{make_shared(out0), make_shared(out1)}; - auto f = make_shared(results, ParameterVector{SENT, H_init, C_init, W, R}); + auto results = ResultVector{make_shared(out0), make_shared(out1)}; + auto f = make_shared(results, ParameterVector{SENT, H_init, C_init, W, R}); ASSERT_EQ(tensor_iterator->get_num_iterations(), 10); std::map, ov::PartialShape> dyn; @@ -238,13 +239,13 @@ TEST(type_prop, tensor_iterator_dyn_slice) { const size_t I = 8; // Input size ov::PartialShape ps = {N, ov::Dimension::dynamic(), I}; - auto SENT = make_shared(element::f32, ps); + auto SENT = make_shared(element::f32, ps); // Body - auto X = make_shared(element::f32, PartialShape::dynamic()); - auto Res = make_shared(X); + auto X = make_shared(element::f32, PartialShape::dynamic()); + auto Res = make_shared(X); auto body = make_shared(Res, ParameterVector{X}); - auto tensor_iterator = make_shared(); + auto tensor_iterator = make_shared(); tensor_iterator->set_body(body); // start=0, stride=1, part_size=1, end=39, axis=1 @@ -254,7 +255,7 @@ TEST(type_prop, tensor_iterator_dyn_slice) { // Output 0 is last Ho, result 0 of body auto out0 = tensor_iterator->get_iter_value(Res, -1); - auto results = ResultVector{make_shared(out0)}; + auto results = ResultVector{make_shared(out0)}; auto model = make_shared(results, ParameterVector{SENT}); EXPECT_EQ(tensor_iterator->get_num_iterations(), -1); diff --git a/src/core/tests/type_prop/tile.cpp b/src/core/tests/type_prop/tile.cpp index d7ff07cedf2a92..0419b8254873fb 100644 --- a/src/core/tests/type_prop/tile.cpp +++ b/src/core/tests/type_prop/tile.cpp @@ -2,14 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/tile.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; class TypePropTileTest : public TypePropOpTest { @@ -18,8 +21,8 @@ class TypePropTileTest : public TypePropOpTest { }; TEST_F(TypePropTileTest, exception_if_repeats_are_float) { - const auto data = make_shared(element::f64, Shape{2, 3, 4}); - const auto repeats = op::Constant::create(element::f32, Shape{3}, {3, 2, 1}); + const auto data = make_shared(element::f64, Shape{2, 3, 4}); + const auto repeats = ov::op::v0::Constant::create(element::f32, Shape{3}, {3, 2, 1}); OV_EXPECT_THROW(auto op = make_op(data, repeats), NodeValidationFailure, @@ -27,8 +30,8 @@ TEST_F(TypePropTileTest, exception_if_repeats_are_float) { } TEST_F(TypePropTileTest, exception_if_repeats_shape_is_not_rank_1) { - const auto data = make_shared(element::f64, Shape{2, 3, 4}); - const auto repeats = op::Constant::create(element::i16, Shape{3, 1}, {3, 2, 1}); + const auto data = make_shared(element::f64, Shape{2, 3, 4}); + const auto repeats = ov::op::v0::Constant::create(element::i16, Shape{3, 1}, {3, 2, 1}); OV_EXPECT_THROW(auto op = make_op(data, repeats), NodeValidationFailure, @@ -36,8 +39,8 @@ TEST_F(TypePropTileTest, exception_if_repeats_shape_is_not_rank_1) { } TEST_F(TypePropTileTest, repeats_has_negative_values) { - const auto data = make_shared(element::i32, PartialShape{-1, 3, 4, {-1, 5}, {4, -1}}); - const auto repeats = op::Constant::create(element::i8, Shape{5}, {-1, -2, 1, -1, -1}); + const auto data = make_shared(element::i32, PartialShape{-1, 3, 4, {-1, 5}, {4, -1}}); + const auto repeats = ov::op::v0::Constant::create(element::i8, Shape{5}, {-1, -2, 1, -1, -1}); auto op = make_op(data, repeats); EXPECT_EQ(op->get_element_type(), element::i32); @@ -45,8 +48,8 @@ TEST_F(TypePropTileTest, repeats_has_negative_values) { } TEST_F(TypePropTileTest, repeats_are_undefined_and_its_rank_lt_data_rank) { - const auto data = make_shared(element::f32, Shape{6, 8, 10}); - const auto repeats = make_shared(element::i32, Shape{2}); + const auto data = make_shared(element::f32, Shape{6, 8, 10}); + const auto repeats = make_shared(element::i32, Shape{2}); const auto op = make_op(data, repeats); @@ -55,8 +58,8 @@ TEST_F(TypePropTileTest, repeats_are_undefined_and_its_rank_lt_data_rank) { } TEST_F(TypePropTileTest, repeats_are_undefined_and_its_rank_gt_data_rank) { - const auto data = make_shared(element::f32, Shape{6, 8, 10}); - const auto repeats = make_shared(element::i32, Shape{5}); + const auto data = make_shared(element::f32, Shape{6, 8, 10}); + const auto repeats = make_shared(element::i32, Shape{5}); const auto op = make_op(data, repeats); @@ -65,8 +68,8 @@ TEST_F(TypePropTileTest, repeats_are_undefined_and_its_rank_gt_data_rank) { } TEST_F(TypePropTileTest, data_dynamic_rank_repeats_are_undefined) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto repeats = make_shared(element::i32, Shape{5}); + const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto repeats = make_shared(element::i32, Shape{5}); const auto op = make_op(data, repeats); @@ -75,8 +78,8 @@ TEST_F(TypePropTileTest, data_dynamic_rank_repeats_are_undefined) { } TEST_F(TypePropTileTest, data_and_repeats_are_dynamic_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto repeats = make_shared(element::i32, PartialShape::dynamic()); + const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto repeats = make_shared(element::i32, PartialShape::dynamic()); const auto op = make_op(data, repeats); @@ -89,13 +92,13 @@ TEST_F(TypePropTileTest, propagate_label_and_dynamic_value_no_repeats) { set_shape_labels(p_shape, 1); constexpr auto et = element::i64; - const auto labeled_param = std::make_shared(et, p_shape); - const auto labeled_shape_of = std::make_shared(labeled_param); + const auto labeled_param = std::make_shared(et, p_shape); + const auto labeled_shape_of = std::make_shared(labeled_param); - const auto repeats = op::Constant::create(element::i32, Shape{1}, {1}); + const auto repeats = ov::op::v0::Constant::create(element::i32, Shape{1}, {1}); const auto op = make_op(labeled_shape_of, repeats); const auto bc = - std::make_shared(std::make_shared(ov::element::i32, PartialShape{1}), + std::make_shared(std::make_shared(ov::element::i32, PartialShape{1}), op, "BIDIRECTIONAL"); @@ -109,13 +112,13 @@ TEST_F(TypePropTileTest, propagate_label_and_dynamic_value) { set_shape_labels(p_shape, 1); constexpr auto et = element::i64; - const auto labeled_param = std::make_shared(et, p_shape); - const auto labeled_shape_of = std::make_shared(labeled_param); + const auto labeled_param = std::make_shared(et, p_shape); + const auto labeled_shape_of = std::make_shared(labeled_param); - const auto repeats = op::Constant::create(element::i32, Shape{1}, {2}); + const auto repeats = ov::op::v0::Constant::create(element::i32, Shape{1}, {2}); const auto op = make_op(labeled_shape_of, repeats); const auto bc = - std::make_shared(std::make_shared(ov::element::i32, PartialShape{1}), + std::make_shared(std::make_shared(ov::element::i32, PartialShape{1}), op, "BIDIRECTIONAL"); @@ -127,10 +130,10 @@ TEST_F(TypePropTileTest, propagate_label_and_dynamic_value) { TEST_F(TypePropTileTest, preserve_partial_values_and_labels) { auto shape = PartialShape{1, {1, 2}, {-1, 3}, {2, -1}, -1}; set_shape_labels(shape, 20); - const auto p_repeats = std::make_shared(element::i64, shape); - const auto shape_of_repeats = std::make_shared(p_repeats); + const auto p_repeats = std::make_shared(element::i64, shape); + const auto shape_of_repeats = std::make_shared(p_repeats); - auto data = op::Constant::create(element::i64, Shape{2, 2, 2, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8}); + auto data = ov::op::v0::Constant::create(element::i64, Shape{2, 2, 2, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8}); const auto op = make_op(data, shape_of_repeats); @@ -140,8 +143,8 @@ TEST_F(TypePropTileTest, preserve_partial_values_and_labels) { } TEST_F(TypePropTileTest, repeats_has_dynamic_shape) { - const auto data = make_shared(element::f32, PartialShape{1, 3, 10, 2, 5}); - const auto repeats = make_shared(element::i32, PartialShape::dynamic()); + const auto data = make_shared(element::f32, PartialShape{1, 3, 10, 2, 5}); + const auto repeats = make_shared(element::i32, PartialShape::dynamic()); const auto op = make_op(data, repeats); @@ -149,8 +152,8 @@ TEST_F(TypePropTileTest, repeats_has_dynamic_shape) { } TEST_F(TypePropTileTest, repeats_has_interval_shape) { - const auto data = make_shared(element::f32, PartialShape{1, 3, 10, 2, 5}); - const auto repeats = make_shared(element::i32, PartialShape{{3, 10}}); + const auto data = make_shared(element::f32, PartialShape{1, 3, 10, 2, 5}); + const auto repeats = make_shared(element::i32, PartialShape{{3, 10}}); const auto op = make_op(data, repeats); @@ -222,8 +225,8 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(TileTest, default_ctor) { constexpr auto dt = element::f16; - const auto data = make_shared(dt, shape_in); - const auto repeats = op::Constant::create(element::i64, Shape{repeats_val.size()}, repeats_val); + const auto data = make_shared(dt, shape_in); + const auto repeats = ov::op::v0::Constant::create(element::i64, Shape{repeats_val.size()}, repeats_val); const auto op = make_op(); op->set_arguments(OutputVector{data, repeats}); @@ -237,8 +240,8 @@ TEST_P(TileTest, propagate_shapes_and_labels) { ASSERT_TRUE(shape_in.rank().is_static()) << "Cannot test labels propagation for dynamic rank."; constexpr auto dt = element::f32; - const auto data = make_shared(dt, shape_in); - const auto repeats = op::Constant::create(element::i64, Shape{repeats_val.size()}, repeats_val); + const auto data = make_shared(dt, shape_in); + const auto repeats = ov::op::v0::Constant::create(element::i64, Shape{repeats_val.size()}, repeats_val); const auto op = make_op(data, repeats); diff --git a/src/core/tests/type_prop/transpose.cpp b/src/core/tests/type_prop/transpose.cpp index 6dc63acd044cc5..e8ac81925a4a7e 100644 --- a/src/core/tests/type_prop/transpose.cpp +++ b/src/core/tests/type_prop/transpose.cpp @@ -2,42 +2,49 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/transpose.hpp" + #include "common_test_utils/type_prop.hpp" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/shape_of.hpp" #include "sequnce_generator.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; using namespace ov::op; TEST(type_prop, transpose_arg_static_input_order_static_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, Shape{4}); + auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::i64, Shape{4}); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(4)); } TEST(type_prop, transpose_arg_static_input_order_constant_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = op::Constant::create(element::i64, Shape{4}, vector{2, 1, 0, 3}); + auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto input_order = ov::op::v0::Constant::create(element::i64, Shape{4}, vector{2, 1, 0, 3}); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), (PartialShape{6, 4, 2, 8})); } TEST(type_prop, transpose_arg_static_input_order_constant_invalid_perm) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = op::Constant::create(element::i64, Shape{4}, vector{2, 9, 0, 3}); + auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto input_order = ov::op::v0::Constant::create(element::i64, Shape{4}, vector{2, 9, 0, 3}); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect invalid permutation"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), @@ -49,11 +56,11 @@ TEST(type_prop, transpose_arg_static_input_order_constant_invalid_perm) { TEST(type_prop, transpose_with_not_unique_order) { const auto order = ov::TensorLabel{1, 0, 1}; - auto arg = make_shared(element::f32, Shape{1, 4, 300}); - auto input_order = make_shared(element::i64, Shape{order.size()}, order); + auto arg = make_shared(element::f32, Shape{1, 4, 300}); + auto input_order = make_shared(element::i64, Shape{order.size()}, order); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect invalid permutation"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Permutation AxisVector{1, 0, 1} is not valid for input shape")); @@ -63,60 +70,63 @@ TEST(type_prop, transpose_with_not_unique_order) { } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_ok) { - auto arg = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, Shape{4}); + auto arg = make_shared(element::f32, + PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::i64, Shape{4}); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(4)); } TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(4)); } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_static_dynamic_ok) { - auto arg = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + auto arg = make_shared(element::f32, + PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(4)); } TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_ok) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + auto arg = make_shared(element::f32, PartialShape::dynamic()); + auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic()); } TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_dynamic_ok) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto input_order = make_shared(element::i64, PartialShape::dynamic()); + auto arg = make_shared(element::f32, PartialShape::dynamic()); + auto input_order = make_shared(element::i64, PartialShape::dynamic()); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic()); } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_dynamic_ok) { - auto arg = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape::dynamic()); + auto arg = make_shared(element::f32, + PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::i64, PartialShape::dynamic()); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(4)); @@ -124,31 +134,31 @@ TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_dynamic_ok) { TEST(type_prop, transpose_arg_rank_dynamic_input_order_const_ok) { const auto axes_order = std::vector{1, 3, 0, 2}; - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto input_order = op::Constant::create(element::i64, Shape{axes_order.size()}, axes_order); + auto arg = make_shared(element::f32, PartialShape::dynamic()); + auto input_order = ov::op::v0::Constant::create(element::i64, Shape{axes_order.size()}, axes_order); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(axes_order.size())); } TEST(type_prop, transpose_dynamic_interval_input_data) { - auto arg = make_shared(element::f32, PartialShape{Dimension(4, 6), Dimension(2, 3), 8}); - auto input_order = make_shared(element::i64, Shape{3}); + auto arg = make_shared(element::f32, PartialShape{Dimension(4, 6), Dimension(2, 3), 8}); + auto input_order = make_shared(element::i64, Shape{3}); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(3)); } TEST(type_prop, transpose_arg_static_input_order_static_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{2, 2}); + auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); + auto input_order = make_shared(element::i64, PartialShape{2, 2}); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect input order not vector"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector.")); @@ -158,11 +168,11 @@ TEST(type_prop, transpose_arg_static_input_order_static_input_order_not_vector) } TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); + auto input_order = make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect input order not vector"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector.")); @@ -172,11 +182,11 @@ TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_input_order } TEST(type_prop, transpose_arg_static_input_order_static_input_order_wrong_size) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{5}); + auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); + auto input_order = make_shared(element::i64, PartialShape{5}); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect input order wrong size"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must have shape [n], where n is the rank of arg.")); @@ -186,11 +196,12 @@ TEST(type_prop, transpose_arg_static_input_order_static_input_order_wrong_size) } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape{2, 2}); + auto arg = make_shared(element::f32, + PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::i64, PartialShape{2, 2}); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect input order not vector"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector.")); @@ -200,11 +211,12 @@ TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_input_order } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_static_dynamic_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + auto arg = make_shared(element::f32, + PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect input order not vector"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector.")); @@ -214,11 +226,11 @@ TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_static_dynami } TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto input_order = make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + auto arg = make_shared(element::f32, PartialShape::dynamic()); + auto input_order = make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect input order not vector"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must be a vector.")); @@ -228,21 +240,21 @@ TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_input } TEST(type_prop, transpose_input_order_et_dynamic_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::dynamic, Shape{4}); + auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::dynamic, Shape{4}); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(4)); } TEST(type_prop, transpose_input_order_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::boolean, Shape{4}); + auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::boolean, Shape{4}); try { - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); FAIL() << "Did not detect input element type not i64"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Input order must have an integral number element type.")); @@ -252,10 +264,10 @@ TEST(type_prop, transpose_input_order_et_wrong) { } TEST(type_prop, transpose_with_empty_order) { - auto arg = make_shared(element::f32, Shape{1, 300}); - auto input_order = make_shared(element::i64, Shape({0}), ov::TensorLabel()); + auto arg = make_shared(element::f32, Shape{1, 300}); + auto input_order = make_shared(element::i64, Shape({0}), ov::TensorLabel()); - auto r = make_shared(arg, input_order); + auto r = make_shared(arg, input_order); EXPECT_EQ(r->get_output_element_type(0), element::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape({300, 1}))); @@ -270,8 +282,8 @@ TEST(type_prop, transpose_order_as_parameter_shape) { const auto shape_of = make_shared(param); // order after gather [1, 2, 0] const auto gather = make_shared(shape_of, - op::Constant::create(element::i64, {3}, {2, 0, 1}), - op::Constant::create(element::i64, {}, {0})); + ov::op::v0::Constant::create(element::i64, {3}, {2, 0, 1}), + ov::op::v0::Constant::create(element::i64, {}, {0})); const auto r = make_shared(arg, gather); @@ -285,15 +297,15 @@ TEST(type_prop, transpose_order_as_parameter_shape_after_transformation) { const auto param = make_shared(element::i64, PartialShape{8, 20, 1}); const auto shape_of = make_shared(param); - const auto cast_fp = make_shared(shape_of, element::f32); - const auto mul = make_shared(cast_fp, op::Constant::create(element::f32, {3}, {-2, 1, -2})); - const auto div = make_shared(mul, op::Constant::create(element::f32, {3}, {-10, 41, -1})); + const auto cast_fp = make_shared(shape_of, element::f32); + const auto mul = make_shared(cast_fp, ov::op::v0::Constant::create(element::f32, {3}, {-2, 1, -2})); + const auto div = make_shared(mul, ov::op::v0::Constant::create(element::f32, {3}, {-10, 41, -1})); // order after convert [1, 0, 2] - const auto cast_int = make_shared(div, element::i32); + const auto cast_int = make_shared(div, element::i32); // order after gather [2, 1, 0] const auto gather = make_shared(cast_int, - op::Constant::create(element::i32, {3}, {2, 0, 1}), - op::Constant::create(element::i32, {}, {0})); + ov::op::v0::Constant::create(element::i32, {3}, {2, 0, 1}), + ov::op::v0::Constant::create(element::i32, {}, {0})); const auto r = make_shared(arg, gather); @@ -307,9 +319,10 @@ TEST(type_prop, transpose_order_as_parameter_shape_after_transformation) { * One dimension is dynamic, transposed output shape cannot be deduced and will be dynamic. */ TEST(type_prop, transpose_when_order_is_shape_of_dynamic_partial_shape) { - const auto arg = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); + const auto arg = + make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); - const auto param = make_shared(element::i64, PartialShape{0, 2, Dimension(1, 2)}); + const auto param = make_shared(element::i64, PartialShape{0, 2, Dimension(1, 2)}); const auto shape_of = make_shared(param); const auto r = make_shared(arg, shape_of); @@ -373,15 +386,15 @@ INSTANTIATE_TEST_SUITE_P( PrintToStringParamName()); TEST_P(TransposeTest, use_default_ctor) { - const auto input = make_shared(exp_type, input_p_shape); - const auto order = op::Constant::create(element::i64, Shape{transpose_order.size()}, transpose_order); + const auto input = make_shared(exp_type, input_p_shape); + const auto order = ov::op::v0::Constant::create(element::i64, Shape{transpose_order.size()}, transpose_order); - const auto output = make_shared(); + const auto output = make_shared(); output->set_arguments(NodeVector{input, order}); output->validate_and_infer_types(); - EXPECT_EQ(output->get_output_element_type(op::Transpose::ARG_T), exp_type); - EXPECT_EQ(output->get_output_partial_shape(op::Transpose::ARG_T), exp_p_shape); + EXPECT_EQ(output->get_output_element_type(op::v1::Transpose::ARG_T), exp_type); + EXPECT_EQ(output->get_output_partial_shape(op::v1::Transpose::ARG_T), exp_p_shape); } /** @@ -390,13 +403,13 @@ TEST_P(TransposeTest, use_default_ctor) { * The interval dimensions should be moved accordingly to transpose order. */ TEST_P(TransposeTest, propagate_interval_shape) { - const auto input = make_shared(exp_type, input_p_shape); - const auto order = op::Constant::create(element::i64, Shape{transpose_order.size()}, transpose_order); + const auto input = make_shared(exp_type, input_p_shape); + const auto order = ov::op::v0::Constant::create(element::i64, Shape{transpose_order.size()}, transpose_order); - const auto output = make_shared(input, order); + const auto output = make_shared(input, order); - EXPECT_EQ(output->get_output_element_type(op::Transpose::ARG_T), exp_type); - EXPECT_EQ(output->get_output_partial_shape(op::Transpose::ARG_T), exp_p_shape); + EXPECT_EQ(output->get_output_element_type(op::v1::Transpose::ARG_T), exp_type); + EXPECT_EQ(output->get_output_partial_shape(op::v1::Transpose::ARG_T), exp_p_shape); } /** @@ -412,9 +425,9 @@ TEST_P(TransposeTest, propagate_labels) { set_shape_labels(input_p_shape, labels); - const auto input = make_shared(exp_type, input_p_shape); - const auto order = op::Constant::create(element::i64, Shape{transpose_order.size()}, transpose_order); - const auto output = make_shared(input, order); + const auto input = make_shared(exp_type, input_p_shape); + const auto order = ov::op::v0::Constant::create(element::i64, Shape{transpose_order.size()}, transpose_order); + const auto output = make_shared(input, order); - EXPECT_EQ(get_shape_labels(output->get_output_partial_shape(op::Transpose::ARG_T)), exp_labels); + EXPECT_EQ(get_shape_labels(output->get_output_partial_shape(op::v1::Transpose::ARG_T)), exp_labels); } diff --git a/src/core/tests/type_prop/unary_elementwise.cpp b/src/core/tests/type_prop/unary_elementwise.cpp index a9b6ecb12334c3..f31c50e42890ad 100644 --- a/src/core/tests/type_prop/unary_elementwise.cpp +++ b/src/core/tests/type_prop/unary_elementwise.cpp @@ -3,16 +3,15 @@ // #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/negative.hpp" using namespace std; -using namespace ngraph; +using namespace ov; TEST(type_prop, unary_arithmetic_bad_argument_element_types) { - auto tv0_2_4_param = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param = make_shared(element::boolean, Shape{2, 4}); try { - auto bc = make_shared(tv0_2_4_param); + auto bc = make_shared(tv0_2_4_param); // Should have thrown, so fail if it didn't FAIL() << "Did not detect incorrect element types for arithmetic operator"; } catch (const NodeValidationFailure& error) { diff --git a/src/core/tests/type_prop/unary_ops.hpp b/src/core/tests/type_prop/unary_ops.hpp index 9dc5a449f1ac98..bf7cce80afc0f4 100644 --- a/src/core/tests/type_prop/unary_ops.hpp +++ b/src/core/tests/type_prop/unary_ops.hpp @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/op/parameter.hpp" -using namespace ngraph; +using namespace ov; template class UnaryOperator : public testing::Test {}; @@ -15,60 +16,60 @@ class UnaryOperator : public testing::Test {}; TYPED_TEST_SUITE_P(UnaryOperator); TYPED_TEST_P(UnaryOperator, shape_inference_Shape1) { - auto param = std::make_shared(element::f32, Shape{2, 2}); + auto param = std::make_shared(element::f32, Shape{2, 2}); auto op = std::make_shared(param); ASSERT_EQ(op->get_shape(), (Shape{2, 2})); } TYPED_TEST_P(UnaryOperator, shape_inference_Shape2) { - auto param = std::make_shared(element::i32, Shape{21, 15, 2}); + auto param = std::make_shared(element::i32, Shape{21, 15, 2}); auto op = std::make_shared(param); ASSERT_EQ(op->get_shape(), (Shape{21, 15, 2})); } TYPED_TEST_P(UnaryOperator, input_type_inference_F32) { - auto param = std::make_shared(element::f32, Shape{10, 2, 2}); + auto param = std::make_shared(element::f32, Shape{10, 2, 2}); auto op = std::make_shared(param); ASSERT_EQ(op->get_element_type(), element::f32); } TYPED_TEST_P(UnaryOperator, input_type_inference_I64) { - auto param = std::make_shared(element::i64, Shape{41, 28, 2}); + auto param = std::make_shared(element::i64, Shape{41, 28, 2}); auto op = std::make_shared(param); ASSERT_EQ(op->get_element_type(), element::i64); } TYPED_TEST_P(UnaryOperator, input_type_inference_U16) { - auto param = std::make_shared(element::u16, Shape{100, 200, 7}); + auto param = std::make_shared(element::u16, Shape{100, 200, 7}); auto op = std::make_shared(param); ASSERT_EQ(op->get_element_type(), element::u16); } TYPED_TEST_P(UnaryOperator, incompatible_input_type_Shape1) { - const auto param = std::make_shared(element::boolean, Shape{100, 2, 50}); - ASSERT_THROW(const auto unused = std::make_shared(param), ngraph::NodeValidationFailure); + const auto param = std::make_shared(element::boolean, Shape{100, 2, 50}); + ASSERT_THROW(const auto unused = std::make_shared(param), ov::NodeValidationFailure); } TYPED_TEST_P(UnaryOperator, incompatible_input_type_Shape2) { - const auto param = std::make_shared(element::boolean, Shape{40, 17, 50}); - ASSERT_THROW(const auto unused = std::make_shared(param), ngraph::NodeValidationFailure); + const auto param = std::make_shared(element::boolean, Shape{40, 17, 50}); + ASSERT_THROW(const auto unused = std::make_shared(param), ov::NodeValidationFailure); } TYPED_TEST_P(UnaryOperator, dynamic_rank_input_shape_2D) { const PartialShape param_shape{Dimension::dynamic(), 10}; - const auto param = std::make_shared(element::f32, param_shape); + const auto param = std::make_shared(element::f32, param_shape); const auto op = std::make_shared(param); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 10})); } TYPED_TEST_P(UnaryOperator, dynamic_rank_input_shape_3D) { const PartialShape param_shape{100, Dimension::dynamic(), 58}; - const auto param = std::make_shared(element::f32, param_shape); + const auto param = std::make_shared(element::f32, param_shape); const auto op = std::make_shared(param); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{100, Dimension(), 58})); } TYPED_TEST_P(UnaryOperator, dynamic_rank_input_shape_full) { - const auto param = std::make_shared(element::f64, PartialShape::dynamic()); + const auto param = std::make_shared(element::f64, PartialShape::dynamic()); const auto op = std::make_shared(param); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } diff --git a/src/core/tests/type_prop/unsqueeze.cpp b/src/core/tests/type_prop/unsqueeze.cpp index 5cf9ccdca7e007..ac5b826ab6792b 100644 --- a/src/core/tests/type_prop/unsqueeze.cpp +++ b/src/core/tests/type_prop/unsqueeze.cpp @@ -2,19 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/unsqueeze.hpp" + #include "common_test_utils/type_prop.hpp" #include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/shape_of.hpp" #include "sequnce_generator.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; TEST(type_prop, unsqueeze) { - auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); - auto axes_node = make_shared(element::u64, Shape{2}, vector{1, 2}); + auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto axes_node = make_shared(element::u64, Shape{2}, vector{1, 2}); auto unsqueeze = make_shared(param, axes_node); EXPECT_EQ(unsqueeze->get_element_type(), element::f32); @@ -22,8 +27,8 @@ TEST(type_prop, unsqueeze) { } TEST(type_prop, unsqueeze_incorrect_axes_shape) { - auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); - auto axes_node = make_shared(element::u64, Shape{1, 1, 1}, 1); + auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto axes_node = make_shared(element::u64, Shape{1, 1, 1}, 1); try { auto unsqueeze = make_shared(param, axes_node); @@ -37,8 +42,8 @@ TEST(type_prop, unsqueeze_incorrect_axes_shape) { TEST(type_prop, unsqueeze_positive_axis_gt_ouput_rank) { constexpr int64_t bad_axis = 6; - auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); - auto axes_node = make_shared(element::u64, Shape{1}, bad_axis); + auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto axes_node = make_shared(element::u64, Shape{1}, bad_axis); try { auto unsqueeze = make_shared(param, axes_node); @@ -53,8 +58,8 @@ TEST(type_prop, unsqueeze_positive_axis_gt_ouput_rank) { TEST(type_prop, unsqueeze_negative_axis_gt_ouput_rank) { constexpr int64_t bad_axis = -7; - auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); - auto axes_node = make_shared(element::u64, Shape{1}, bad_axis); + auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto axes_node = make_shared(element::u64, Shape{1}, bad_axis); try { auto unsqueeze = make_shared(param, axes_node); @@ -68,8 +73,8 @@ TEST(type_prop, unsqueeze_negative_axis_gt_ouput_rank) { } TEST(type_prop, unsqueeze_empty_axes) { - auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); - auto axes_node = make_shared(element::u64, Shape{0}, vector{}); + auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto axes_node = make_shared(element::u64, Shape{0}, vector{}); try { auto unsqueeze = make_shared(param, axes_node); FAIL() << "Unsqueeze axes empty not detected"; @@ -192,7 +197,7 @@ INSTANTIATE_TEST_SUITE_P( PrintToStringParamName()); TEST_P(UnsqueezeTest, dimension_propagation_const_axis_i8) { - const auto axes_node = std::make_shared(element::i8, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i8, Shape{axes.size()}, axes); const auto unsqueeze = std::make_shared(param, axes_node); EXPECT_EQ(unsqueeze->get_element_type(), element::f32); @@ -200,7 +205,7 @@ TEST_P(UnsqueezeTest, dimension_propagation_const_axis_i8) { } TEST_P(UnsqueezeTest, dimension_propagation_const_axis_i32) { - const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); const auto unsqueeze = std::make_shared(param, axes_node); EXPECT_EQ(unsqueeze->get_element_type(), element::f32); @@ -208,7 +213,7 @@ TEST_P(UnsqueezeTest, dimension_propagation_const_axis_i32) { } TEST_P(UnsqueezeTest, dimension_propagation_dynamic_axis_shape) { - const auto axes_node = std::make_shared(element::i64, PartialShape::dynamic()); + const auto axes_node = std::make_shared(element::i64, PartialShape::dynamic()); const auto unsqueeze = std::make_shared(param, axes_node); EXPECT_EQ(unsqueeze->get_element_type(), element::f32); @@ -216,7 +221,7 @@ TEST_P(UnsqueezeTest, dimension_propagation_dynamic_axis_shape) { } TEST_P(UnsqueezeTest, dimension_propagation_static_rank_dynamic_dim_axis_shape) { - const auto axes_node = std::make_shared(element::u32, PartialShape{Dimension(2, 6)}); + const auto axes_node = std::make_shared(element::u32, PartialShape{Dimension(2, 6)}); const auto unsqueeze = std::make_shared(param, axes_node); EXPECT_EQ(unsqueeze->get_element_type(), element::f32); @@ -224,10 +229,10 @@ TEST_P(UnsqueezeTest, dimension_propagation_static_rank_dynamic_dim_axis_shape) } TEST_P(UnsqueezeTest, use_default_ctor) { - const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); - const auto unsqueeze = make_shared(); - unsqueeze->set_arguments(NodeVector{param, axes_node}); + const auto unsqueeze = make_shared(); + unsqueeze->set_arguments(ov::NodeVector{param, axes_node}); unsqueeze->validate_and_infer_types(); EXPECT_EQ(unsqueeze->get_output_element_type(0), element::f32); @@ -242,9 +247,9 @@ TEST_P(UnsqueezeTest, labels_propagation) { std::tie(in_labels, exp_labels) = make_in_exp_labels(); set_shape_labels(p_shape, in_labels); - param = make_shared(element::f32, p_shape); + param = make_shared(element::f32, p_shape); - const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i32, Shape{axes.size()}, axes); const auto unsqueeze = std::make_shared(param, axes_node); EXPECT_EQ(get_shape_labels(unsqueeze->get_output_partial_shape(0)), exp_labels); @@ -277,8 +282,8 @@ TEST_P(UnsqueezeBoundTest, propagate_label_and_dynamic_value) { set_shape_labels(labeled_shape, in_labels); constexpr auto et = element::i64; - const auto labeled_param = std::make_shared(et, labeled_shape); - const auto labeled_shape_of = std::make_shared(labeled_param); + const auto labeled_param = std::make_shared(et, labeled_shape); + const auto labeled_shape_of = std::make_shared(labeled_param); const auto zero = std::vector{0}; const auto axis = std::make_shared(et, Shape{}, zero); diff --git a/src/core/tests/type_prop/variadic_split.cpp b/src/core/tests/type_prop/variadic_split.cpp index 34426aee432ad9..6a3ebd8e5ec73e 100644 --- a/src/core/tests/type_prop/variadic_split.cpp +++ b/src/core/tests/type_prop/variadic_split.cpp @@ -2,15 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/variadic_split.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gmock/gmock.h" -#include "ngraph/ngraph.hpp" +#include "openvino/core/deprecated.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/shape_of.hpp" #include "sequnce_generator.hpp" using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; using VSplitTypePropTestParam = std::tuple { auto exp_labels = in_labels; OPENVINO_SUPPRESS_DEPRECATED_START - const auto n_axis = normalize_axis("", axis, p_shape.rank()); + const auto n_axis = ov::normalize_axis("", axis, p_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END exp_labels[n_axis] = ov::no_label; @@ -92,9 +96,10 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(VariadicSplitTest, dimension_propagation_axis_scalar) { constexpr auto dtype = element::i32; - const auto data = make_shared(dtype, p_shape); - const auto axis_node = make_shared(element::i16, Shape{}, axis); - const auto lengths_node = std::make_shared(element::i16, Shape{split_lengths.size()}, split_lengths); + const auto data = make_shared(dtype, p_shape); + const auto axis_node = make_shared(element::i16, Shape{}, axis); + const auto lengths_node = + std::make_shared(element::i16, Shape{split_lengths.size()}, split_lengths); const auto var_split = make_shared(data, axis_node, lengths_node); @@ -105,9 +110,10 @@ TEST_P(VariadicSplitTest, dimension_propagation_axis_scalar) { TEST_P(VariadicSplitTest, dimension_propagation_axis_1d) { constexpr auto dtype = element::u64; - const auto data = make_shared(dtype, p_shape); - const auto axis_node = make_shared(element::i32, Shape{1}, axis); - const auto lengths_node = std::make_shared(element::i32, Shape{split_lengths.size()}, split_lengths); + const auto data = make_shared(dtype, p_shape); + const auto axis_node = make_shared(element::i32, Shape{1}, axis); + const auto lengths_node = + std::make_shared(element::i32, Shape{split_lengths.size()}, split_lengths); const auto var_split = make_shared(data, axis_node, lengths_node); @@ -118,9 +124,10 @@ TEST_P(VariadicSplitTest, dimension_propagation_axis_1d) { TEST_P(VariadicSplitTest, use_default_ctor) { constexpr auto dtype = element::f32; - const auto param = make_shared(dtype, p_shape); - const auto axis_node = make_shared(element::i64, Shape{}, axis); - const auto lengths_node = std::make_shared(element::i64, Shape{split_lengths.size()}, split_lengths); + const auto param = make_shared(dtype, p_shape); + const auto axis_node = make_shared(element::i64, Shape{}, axis); + const auto lengths_node = + std::make_shared(element::i64, Shape{split_lengths.size()}, split_lengths); const auto var_split = make_shared(); var_split->set_arguments(NodeVector{param, axis_node, lengths_node}); @@ -136,9 +143,10 @@ TEST_P(VariadicSplitTest, label_propagation) { std::tie(in_labels, exp_labels) = make_in_exp_labels(); set_shape_labels(p_shape, in_labels); - const auto data = make_shared(element::f32, p_shape); - const auto axis_node = make_shared(element::i64, Shape{}, axis); - const auto lengths_node = std::make_shared(element::i64, Shape{split_lengths.size()}, split_lengths); + const auto data = make_shared(element::f32, p_shape); + const auto axis_node = make_shared(element::i64, Shape{}, axis); + const auto lengths_node = + std::make_shared(element::i64, Shape{split_lengths.size()}, split_lengths); const auto var_split = make_shared(data, axis_node, lengths_node); EXPECT_EQ(var_split->get_output_size(), split_lengths.size()); @@ -206,12 +214,12 @@ TEST_P(VariadicSplitBoundTest, propagate_label_and_dynamic_value) { set_shape_labels(p_shape, in_labels); constexpr auto et = element::i64; - const auto labeled_param = std::make_shared(et, p_shape); - const auto labeled_shape_of = std::make_shared(labeled_param); + const auto labeled_param = std::make_shared(et, p_shape); + const auto labeled_shape_of = std::make_shared(labeled_param); const auto zero = std::vector{0}; const auto axis_node = std::make_shared(et, Shape{}, zero); - const auto lengths_node = std::make_shared(et, Shape{split_lengths.size()}, split_lengths); + const auto lengths_node = std::make_shared(et, Shape{split_lengths.size()}, split_lengths); const auto var_split = std::make_shared(labeled_shape_of, axis_node, lengths_node); for (auto& output : var_split->outputs()) {