Skip to content

Commit

Permalink
Refactor MemoryLayerTest (openvinotoolkit#20914)
Browse files Browse the repository at this point in the history
* Refactor MemoryLayerTest

* Apply comments
  • Loading branch information
olpipi authored Nov 8, 2023
1 parent ace986c commit 588e96b
Show file tree
Hide file tree
Showing 6 changed files with 283 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,24 @@

#include <vector>

#include "single_layer_tests/memory.h"

using namespace LayerTestsDefinitions;
#include "single_op_tests/memory.h"

namespace {
using ov::test::MemoryLayerTest;

std::vector<ngraph::helpers::MemoryTransformation> transformation {
ngraph::helpers::MemoryTransformation::NONE,
ngraph::helpers::MemoryTransformation::LOW_LATENCY_V2,
ngraph::helpers::MemoryTransformation::LOW_LATENCY_V2_REGULAR_API,
ngraph::helpers::MemoryTransformation::LOW_LATENCY_V2_ORIGINAL_INIT,
std::vector<ov::test::utils::MemoryTransformation> transformation {
ov::test::utils::MemoryTransformation::NONE,
ov::test::utils::MemoryTransformation::LOW_LATENCY_V2,
ov::test::utils::MemoryTransformation::LOW_LATENCY_V2_ORIGINAL_INIT,
};

const std::vector<InferenceEngine::SizeVector> inShapes = {
const std::vector<ov::Shape> inShapes = {
{3},
{100, 100},
};

const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::FP32,
const std::vector<ov::element::Type> input_types = {
ov::element::f32,
};

const std::vector<int64_t> iterationCount {
Expand All @@ -32,14 +30,14 @@ const std::vector<int64_t> iterationCount {
10
};

INSTANTIATE_TEST_SUITE_P(smoke_MemoryTest, MemoryTest,
INSTANTIATE_TEST_SUITE_P(smoke_MemoryTest, MemoryLayerTest,
::testing::Combine(
::testing::ValuesIn(transformation),
::testing::ValuesIn(iterationCount),
::testing::ValuesIn(inShapes),
::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(input_types),
::testing::Values(ov::test::utils::DEVICE_CPU, "HETERO:CPU")),
MemoryTest::getTestCaseName);
MemoryLayerTest::getTestCaseName);

} // namespace

Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "shared_test_classes/single_op/memory.hpp"

namespace ov {
namespace test {
TEST_P(MemoryLayerTest, Inference) {
run();
};

TEST_P(MemoryV3LayerTest, Inference) {
run();
};
} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <memory>
#include <string>
#include <tuple>
#include <vector>

#include "shared_test_classes/base/ov_subgraph.hpp"
#include "common_test_utils/test_enums.hpp"

namespace ov {
namespace test {

using MemoryLayerTestParams = std::tuple<
ov::test::utils::MemoryTransformation, // Apply Memory transformation
int64_t, // iterationCount
ov::Shape, // inputShape
ov::element::Type, // modelType
std::string // targetDevice
>;

class MemoryLayerTest : public testing::WithParamInterface<MemoryLayerTestParams>,
virtual public ov::test::SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<MemoryLayerTestParams> &obj);

protected:
void SetUp() override;
void infer() override;
std::vector<ov::Tensor> calculate_refs() override;

void CreateCommonFunc(ov::element::Type model_type, ov::Shape input_shape);
void CreateTIFunc(ov::element::Type model_type, ov::Shape input_shape);
void ApplyLowLatency(ov::test::utils::MemoryTransformation transformation);

bool use_version_3 = false;
int64_t iteration_count;
};

class MemoryV3LayerTest : public MemoryLayerTest {
protected:
void SetUp() override;
};

} // namespace test
} // namespace ov
174 changes: 174 additions & 0 deletions src/tests/functional/shared_test_classes/src/single_op/memory.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "shared_test_classes/single_op/memory.hpp"

#include "openvino/pass/low_latency.hpp"
#include "openvino/pass/manager.hpp"
#include "template/properties.hpp"

#include "openvino/op/parameter.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/assign.hpp"
#include "openvino/op/read_value.hpp"
#include "openvino/op/util/variable.hpp"
#include "openvino/op/tensor_iterator.hpp"

namespace ov {
namespace test {

std::string MemoryLayerTest::getTestCaseName(const testing::TestParamInfo<MemoryLayerTestParams> &obj) {
int64_t iteration_count;
ov::element::Type model_type;
ov::Shape input_shape;
std::string target_device;
ov::test::utils::MemoryTransformation transformation;
std::tie(transformation, iteration_count, input_shape, model_type, target_device) = obj.param;

std::ostringstream result;
result << "transformation=" << transformation << "_";
result << "iteration_count=" << iteration_count << "_";
result << "IS=" << ov::test::utils::vec2str(input_shape) << "_";
result << "modelType=" << model_type.get_type_name() << "_";
result << "trgDev=" << target_device;
result << ")";
return result.str();
}

void MemoryLayerTest::SetUp() {
ov::element::Type model_type;
ov::Shape input_shape;
ov::test::utils::MemoryTransformation transformation;

std::tie(transformation, iteration_count, input_shape, model_type, targetDevice) = this->GetParam();

if (transformation == ov::test::utils::MemoryTransformation::NONE) {
CreateCommonFunc(model_type, input_shape);
} else {
CreateTIFunc(model_type, input_shape);
ApplyLowLatency(transformation);
}
}

void MemoryLayerTest::CreateCommonFunc(ov::element::Type model_type, ov::Shape input_shape) {
ov::ParameterVector param {std::make_shared<ov::op::v0::Parameter>(model_type, input_shape)};
const auto variable_info = targetDevice == ov::test::utils::DEVICE_GPU ?
ov::op::util::VariableInfo{input_shape, model_type, "v0"} :
ov::op::util::VariableInfo{PartialShape::dynamic(), element::dynamic, "v0"};
auto variable = std::make_shared<ov::op::util::Variable>(variable_info);

std::shared_ptr<ov::op::util::ReadValueBase> read_value;
if (use_version_3) {
read_value = std::make_shared<ov::op::v3::ReadValue>(param[0], variable->get_info().variable_id);
} else {
read_value = std::make_shared<ov::op::v6::ReadValue>(param[0], variable);
}

auto add = std::make_shared<ov::op::v1::Add>(read_value, param.at(0));

std::shared_ptr<ov::op::util::AssignBase> assign;
if (use_version_3) {
assign = std::make_shared<ov::op::v3::Assign>(add, variable->get_info().variable_id);
} else {
assign = std::make_shared<ov::op::v6::Assign>(add, variable);
}

auto res = std::make_shared<ov::op::v0::Result>(add);
function = std::make_shared<ov::Model>(ResultVector{res}, SinkVector{assign}, param, "TestMemory");
}

void MemoryLayerTest::CreateTIFunc(ov::element::Type model_type, ov::Shape input_shape) {
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, ov::Shape(input_shape));

std::vector<std::vector<size_t>> shape = {{static_cast<size_t>(iteration_count), 1}};
auto iter_count = std::make_shared<ov::op::v0::Parameter>(model_type, ov::Shape{static_cast<size_t>(iteration_count), 1});

// Body
auto X = std::make_shared<ov::op::v0::Parameter>(model_type, ov::Shape(input_shape));
auto Y = std::make_shared<ov::op::v0::Parameter>(model_type, ov::Shape(input_shape));
auto Iter = std::make_shared<ov::op::v0::Parameter>(model_type, ov::Shape{1, 1});
auto add = std::make_shared<ov::op::v1::Add>(X, Y);
auto res = std::make_shared<ov::op::v0::Result>(add);
auto Iter_res = std::make_shared<ov::op::v0::Result>(Iter);
auto body = std::make_shared<ov::Model>(OutputVector{res, Iter_res}, ParameterVector {X, Y, Iter});

// TI construction
auto tensor_iterator = std::make_shared<ov::op::v0::TensorIterator>();
tensor_iterator->set_body(body);

tensor_iterator->set_merged_input(X, param, res);
tensor_iterator->set_invariant_input(Y, param);
tensor_iterator->set_sliced_input(Iter, iter_count, 0, 1, 1, -1, 0);

auto output = tensor_iterator->get_iter_value(res, -1);
auto output_iter = tensor_iterator->get_concatenated_slices(Iter_res, 0, 1, 1, -1, 0);
function = std::make_shared<ov::Model>(OutputVector{output, output_iter},
ParameterVector{param, iter_count},
"PureTI");
}

void MemoryLayerTest::ApplyLowLatency(ov::test::utils::MemoryTransformation transformation) {
if (transformation == ov::test::utils::MemoryTransformation::LOW_LATENCY_V2) {
function->validate_nodes_and_infer_types();
ov::pass::Manager manager;
manager.register_pass<pass::LowLatency2>();
manager.run_passes(function);
} else if (transformation == ov::test::utils::MemoryTransformation::LOW_LATENCY_V2_ORIGINAL_INIT) {
function->validate_nodes_and_infer_types();
ov::pass::Manager manager;
manager.register_pass<pass::LowLatency2>(false);
manager.run_passes(function);
}
}

void MemoryLayerTest::infer() {
inferRequest = compiledModel.create_infer_request();
for (size_t iter = 0; iter <= iteration_count; iter++) {
for (const auto& input : inputs) {
inferRequest.set_tensor(input.first, input.second);
}
inferRequest.infer();
}
}

std::vector<ov::Tensor> MemoryLayerTest::calculate_refs() {
if (is_report_stages) {
std::cout << "[ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is started"<< std::endl;
}
auto start_time = std::chrono::system_clock::now();

update_ref_model();
match_parameters();

auto compiledModelRef = core->compile_model(functionRefs, ov::test::utils::DEVICE_TEMPLATE, {{ ov::template_plugin::disable_transformations(true) }});
auto inferRequestRef = compiledModelRef.create_infer_request();

for (size_t iter = 0; iter <= iteration_count; iter++) {
for (const auto& param : functionRefs->get_parameters()) {
inferRequestRef.set_tensor(param->get_default_output(), inputs.at(matched_parameters[param]));
}
inferRequestRef.infer();
}
auto outputs = std::vector<ov::Tensor>{};
for (const auto& output : functionRefs->outputs()) {
outputs.push_back(inferRequestRef.get_tensor(output));
}
if (is_report_stages) {
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> duration = end_time - start_time;
std::cout << "[ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is finished successfully. Duration is " << duration.count() << "s" << std::endl;
}
return outputs;
}

void MemoryV3LayerTest::SetUp() {
use_version_3 = true;
MemoryLayerTest::SetUp();
}

} // namespace test
} // namespace ov

Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,12 @@ enum class TensorIteratorBody {
// CNN todo: implement
};

// clang-format on
enum class MemoryTransformation {
NONE,
LOW_LATENCY_V2,
LOW_LATENCY_V2_REGULAR_API,
LOW_LATENCY_V2_ORIGINAL_INIT
};

std::ostream& operator<<(std::ostream& os, const ReductionType& m);

Expand Down Expand Up @@ -195,6 +200,8 @@ std::ostream& operator<<(std::ostream& os, ov::op::v8::MatrixNms::DecayFunction

std::ostream& operator<<(std::ostream& os, TensorIteratorBody type);

std::ostream& operator<<(std::ostream& os, MemoryTransformation type);

} // namespace utils
} // namespace test
} // namespace ov
20 changes: 20 additions & 0 deletions src/tests/test_utils/common_test_utils/src/test_enums.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,26 @@ std::ostream& operator<<(std::ostream& os, TensorIteratorBody type) {
return os;
}

std::ostream& operator<<(std::ostream& os, MemoryTransformation type) {
switch (type) {
case MemoryTransformation::NONE:
os << "NONE";
break;
case MemoryTransformation::LOW_LATENCY_V2:
os << "LOW_LATENCY_V2";
break;
case MemoryTransformation::LOW_LATENCY_V2_REGULAR_API:
os << "LOW_LATENCY_V2_REGULAR_API";
break;
case MemoryTransformation::LOW_LATENCY_V2_ORIGINAL_INIT:
os << "LOW_LATENCY_V2_ORIGINAL_INIT";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_TYPE");
}
return os;
}

} // namespace utils
} // namespace test
} // namespace ov

0 comments on commit 588e96b

Please sign in to comment.