Skip to content

Commit

Permalink
Fix docs code snippets (#25864)
Browse files Browse the repository at this point in the history
### Details:
 - *item1*
 - *...*

### Tickets:
 - *ticket-id*
  • Loading branch information
olpipi authored Aug 6, 2024
1 parent f19282f commit 5ec4375
Show file tree
Hide file tree
Showing 17 changed files with 75 additions and 96 deletions.
9 changes: 8 additions & 1 deletion .github/workflows/code_snippets.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,11 @@ jobs:
run: cmake -DCMAKE_BUILD_TYPE=Release -DTHREADING=SEQ -B build

- name: Build snippets
run: cmake --build build --target openvino_docs_snippets --parallel
if: ${{ runner.os == 'Linux' || runner.os == 'macOS'}}
run: cmake --build build --target openvino_docs_snippets --parallel $(nproc)

- name: Build snippets Windows
if: ${{ runner.os == 'Windows'}}
shell: pwsh
run: cmake --build build --target openvino_docs_snippets --parallel $ENV:NUMBER_OF_PROCESSORS

2 changes: 1 addition & 1 deletion docs/articles_en/assets/snippets/multi_threading.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ int main() {
auto compiled_model_1 = core.compile_model(model, device, ov::inference_num_threads(1));

// Use logical processors of Efficient-cores for inference on hybrid platform
auto compiled_model_2 = core.compile_model(model, device, ov::hint::scheduling_core_type(ECORE_ONLY));
auto compiled_model_2 = core.compile_model(model, device, ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::ECORE_ONLY));

// Use one logical processor per CPU core for inference when hyper threading is on
auto compiled_model_3 = core.compile_model(model, device, ov::hint::enable_hyper_threading(false));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ int main() {

{
//! [wrap_dmabuf_fd]
int32_t fd_heap; // create the DMA-BUF System Heap file descriptor
int32_t fd_heap = 0; // create the DMA-BUF System Heap file descriptor
auto remote_tensor = npu_context.create_tensor(in_element_type, in_shape, fd_heap);
//! [wrap_dmabuf_fd]
}
Expand Down
36 changes: 18 additions & 18 deletions docs/articles_en/assets/snippets/ov_dynamic_shapes.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,22 +61,22 @@ ov_model_t* model = NULL;
ov_core_read_model(core, "model.xml", NULL, &model);

//! [ov_dynamic_shapes:print_dynamic]
ov_output_port_t* output_port = NULL;
ov_output_port_t* input_port = NULL;
ov_output_const_port_t* output_port = NULL;
ov_output_const_port_t* input_port = NULL;
ov_partial_shape_t partial_shape;
char * str_partial_shape = NULL;
const char * str_partial_shape = NULL;

// Print output partial shape
{
ov_model_output(model, &output_port);
ov_model_const_output(model, &output_port);
ov_port_get_partial_shape(output_port, &partial_shape);
str_partial_shape = ov_partial_shape_to_string(partial_shape);
printf("The output partial shape: %s", str_partial_shape);
}

// Print input partial shape
{
ov_model_input(model, &input_port);
ov_model_const_input(model, &input_port);
ov_port_get_partial_shape(input_port, &partial_shape);
str_partial_shape = ov_partial_shape_to_string(partial_shape);
printf("The input partial shape: %s", str_partial_shape);
Expand All @@ -85,8 +85,8 @@ printf("The input partial shape: %s", str_partial_shape);
// free allocated resource
ov_free(str_partial_shape);
ov_partial_shape_free(&partial_shape);
ov_output_port_free(output_port);
ov_output_port_free(input_port);
ov_output_const_port_free(output_port);
ov_output_const_port_free(input_port);
//! [ov_dynamic_shapes:print_dynamic]
ov_model_free(model);
ov_core_free(core);
Expand All @@ -98,15 +98,15 @@ ov_core_create(&core);

//! [ov_dynamic_shapes:detect_dynamic]
ov_model_t* model = NULL;
ov_output_port_t* input_port = NULL;
ov_output_port_t* output_port = NULL;
ov_output_const_port_t* input_port = NULL;
ov_output_const_port_t* output_port = NULL;
ov_partial_shape_t partial_shape;

ov_core_read_model(core, "model.xml", NULL, &model);

// for input
{
ov_model_input_by_index(model, 0, &input_port);
ov_model_const_input_by_index(model, 0, &input_port);
ov_port_get_partial_shape(input_port, &partial_shape);
if (ov_partial_shape_is_dynamic(partial_shape)) {
// input is dynamic
Expand All @@ -115,7 +115,7 @@ if (ov_partial_shape_is_dynamic(partial_shape)) {

// for output
{
ov_model_output_by_index(model, 0, &output_port);
ov_model_const_output_by_index(model, 0, &output_port);
ov_port_get_partial_shape(output_port, &partial_shape);
if (ov_partial_shape_is_dynamic(partial_shape)) {
// output is dynamic
Expand All @@ -124,8 +124,8 @@ if (ov_partial_shape_is_dynamic(partial_shape)) {

// free allocated resource
ov_partial_shape_free(&partial_shape);
ov_output_port_free(input_port);
ov_output_port_free(output_port);
ov_output_const_port_free(input_port);
ov_output_const_port_free(output_port);
//! [ov_dynamic_shapes:detect_dynamic]
ov_model_free(model);
ov_core_free(core);
Expand All @@ -147,8 +147,8 @@ ov_infer_request_t* infer_request = NULL;
ov_compiled_model_create_infer_request(compiled_model, &infer_request);

//! [ov_dynamic_shapes:set_input_tensor]
ov_output_port_t* input_port = NULL;
ov_element_type_e* type = NULL;
ov_output_const_port_t* input_port = NULL;
ov_element_type_e type = UNDEFINED;
ov_shape_t input_shape_1;
ov_tensor_t* input_tensor_1 = NULL;
ov_tensor_t* output_tensor = NULL;
Expand All @@ -163,8 +163,8 @@ void* data_2 = NULL;
// Create tensor compatible with the model input
// Shape {1, 128} is compatible with any reshape statements made in previous examples
{
ov_model_input(model, &input_port);
ov_port_get_element_type(input_port, type);
ov_model_const_input(model, &input_port);
ov_port_get_element_type(input_port, &type);
int64_t dims[2] = {1, 128};
ov_shape_create(2, dims, &input_shape_1);
ov_tensor_create(type, input_shape_1, &input_tensor_1);
Expand Down Expand Up @@ -214,7 +214,7 @@ ov_tensor_get_shape(output_tensor, &output_shape_2);
// ... read values in data_2 according to the shape output_shape_2

// free resource
ov_output_port_free(input_port);
ov_output_const_port_free(input_port);
ov_shape_free(&input_shape_1);
ov_tensor_free(input_tensor_1);
ov_shape_free(&output_shape_1);
Expand Down
110 changes: 40 additions & 70 deletions docs/articles_en/assets/snippets/ov_patterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0

// ! [ov:imports]
#include <gtest/gtest.h>

#include "common_test_utils/matcher.hpp"
#include "openvino/op/abs.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/matmul.hpp"
Expand All @@ -22,7 +19,7 @@ using namespace std;
// ! [ov:imports]

// ! [ov:create_simple_model_and_pattern]
TEST(pattern, simple_model_and_pattern) {
void create_simple_model_and_pattern() {
// Create a sample model
PartialShape shape{2, 2};
auto model_param1 = std::make_shared<ov::op::v0::Parameter>(element::i32, shape);
Expand All @@ -39,17 +36,13 @@ TEST(pattern, simple_model_and_pattern) {
auto pattern_abs = std::make_shared<ov::op::v0::Abs>(pattern_mul->output(0));
auto pattern_relu = std::make_shared<ov::op::v0::Relu>(pattern_abs->output(0));

// Create a matcher and try to match the nodes
TestMatcher tm;

// Should perfectly match
ASSERT_TRUE(tm.match(pattern_relu, model_relu));
// pattern_relu should perfectly match model_relu
}
// ! [ov:create_simple_model_and_pattern]


// ! [ov:create_simple_model_and_pattern_wrap_type]
TEST(pattern, simple_model_and_pattern_wrap_type) {
void create_simple_model_and_pattern_wrap_type() {
// Create a sample model
PartialShape shape{2, 2};
auto model_param1 = std::make_shared<ov::op::v0::Parameter>(element::i32, shape);
Expand All @@ -66,17 +59,13 @@ TEST(pattern, simple_model_and_pattern_wrap_type) {
auto pattern_abs = ov::pass::pattern::wrap_type<ov::op::v0::Abs>({pattern_mul->output(0)});
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern_abs->output(0)});

// Create a matcher and try to match the nodes
TestMatcher tm;

// Should perfectly match
ASSERT_TRUE(tm.match(pattern_relu, model_relu));
// pattern_relu should perfectly match model_relu
}
// ! [ov:create_simple_model_and_pattern_wrap_type]


// ! [ov:wrap_type_list]
TEST(pattern, wrap_type_list) {
void wrap_type_list() {
// Create a sample model
PartialShape shape{2, 2};
auto model_param1 = std::make_shared<ov::op::v0::Parameter>(element::i32, shape);
Expand All @@ -95,45 +84,42 @@ TEST(pattern, wrap_type_list) {
auto pattern_abs = ov::pass::pattern::wrap_type<ov::op::v0::Abs>({pattern_mul->output(0)});
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu, ov::op::v0::Sigmoid>({pattern_abs->output(0)});

// Create a matcher and try to match the nodes
TestMatcher tm;

// The same pattern perfectly matches 2 different nodes
ASSERT_TRUE(tm.match(pattern_relu, model_relu));
ASSERT_TRUE(tm.match(pattern_relu, model_sig));
// pattern_relu should perfectly matches model_relu and model_sig
}
// ! [ov:wrap_type_list]

void patterns_misc() {
// ! [ov:any_input]
auto pattern_mul = ov::pass::pattern::wrap_type<ov::op::v0::MatMul>({pattern::any_input(), pattern::any_input()});
auto pattern_abs = ov::pass::pattern::wrap_type<ov::op::v0::Abs>({pattern_mul->output(0)});
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern_abs->output(0)});
// ! [ov:any_input]

// ! [ov:wrap_type_predicate]
ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern::any_input()}, pattern::consumers_count(2));
// ! [ov:wrap_type_predicate]


// ! [ov:any_input_predicate]
auto pattern_mul = ov::pass::pattern::wrap_type<ov::op::v0::MatMul>({pattern::any_input([](const Output<Node>& value){
return value.get_shape().size() == 4;}),
pattern::any_input([](const Output<Node>& value){
return value.get_shape().size() == 4;})});
auto pattern_abs = ov::pass::pattern::wrap_type<ov::op::v0::Abs>({pattern_mul->output(0)});
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern_abs->output(0)});
// ! [ov:any_input_predicate]


// ! [ov:optional_predicate]
auto pattern_sig_opt = ov::pass::pattern::optional<ov::op::v0::Sigmoid>(pattern_relu, pattern::consumers_count(2));
// ! [ov:optional_predicate]
{
// ! [ov:any_input]
auto pattern_mul = ov::pass::pattern::wrap_type<ov::op::v0::MatMul>({pattern::any_input(), pattern::any_input()});
auto pattern_abs = ov::pass::pattern::wrap_type<ov::op::v0::Abs>({pattern_mul->output(0)});
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern_abs->output(0)});
// ! [ov:any_input]

// ! [ov:wrap_type_predicate]
ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern::any_input()}, pattern::consumers_count(2));
// ! [ov:wrap_type_predicate]
}
{
// ! [ov:any_input_predicate]
auto pattern_mul = ov::pass::pattern::wrap_type<ov::op::v0::MatMul>({pattern::any_input([](const Output<Node>& value){
return value.get_shape().size() == 4;}),
pattern::any_input([](const Output<Node>& value){
return value.get_shape().size() == 4;})});
auto pattern_abs = ov::pass::pattern::wrap_type<ov::op::v0::Abs>({pattern_mul->output(0)});
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern_abs->output(0)});
// ! [ov:any_input_predicate]


// ! [ov:optional_predicate]
auto pattern_sig_opt = ov::pass::pattern::optional<ov::op::v0::Sigmoid>(pattern_relu, pattern::consumers_count(2));
// ! [ov:optional_predicate]
}
}


// ! [ov:pattern_or]
TEST(pattern, pattern_or) {
void pattern_or() {
// Create a sample model
PartialShape shape{2, 2};
auto model_param1 = std::make_shared<ov::op::v0::Parameter>(element::i32, shape);
Expand All @@ -158,17 +144,13 @@ TEST(pattern, pattern_or) {
// Create Or node
auto pattern_or = std::make_shared<ov::pass::pattern::op::Or>(OutputVector{red_pattern_sigmoid->output(0), blue_pattern_relu->output(0)});

// Create a matcher and try to match the nodes
TestMatcher tm;

// The same pattern perfectly matches 2 different nodes
ASSERT_TRUE(tm.match(pattern_or, model_relu));
// pattern_or should perfectly matches model_relu
}
// ! [ov:pattern_or]


// ! [ov:pattern_optional_middle]
TEST(pattern, pattern_optional_middle) {
void pattern_optional_middle() {
// Create a sample model
PartialShape shape{2, 2};
auto model_param1 = std::make_shared<ov::op::v0::Parameter>(element::i32, shape);
Expand All @@ -186,17 +168,13 @@ TEST(pattern, pattern_optional_middle) {
auto pattern_sig_opt = ov::pass::pattern::optional<ov::op::v0::Sigmoid>({pattern_abs->output(0)});
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern_sig_opt->output(0)});

// Create a matcher and try to match the nodes
TestMatcher tm;

// Should perfectly match
ASSERT_TRUE(tm.match(pattern_relu, model_relu));
// pattern_relu should perfectly match model_relu
}
// ! [ov:pattern_optional_middle]


// ! [ov:pattern_optional_top]
TEST(pattern, pattern_optional_top) {
void pattern_optional_top() {
// Create a sample model
PartialShape shape{2, 2};
auto model_param1 = std::make_shared<ov::op::v0::Parameter>(element::i32, shape);
Expand All @@ -214,17 +192,13 @@ TEST(pattern, pattern_optional_top) {
auto pattern_abs = ov::pass::pattern::wrap_type<ov::op::v0::Abs>({pattern_mul->output(0)});
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern_abs->output(0)});

// Create a matcher and try to match the nodes
TestMatcher tm;

// Should perfectly match
ASSERT_TRUE(tm.match(pattern_relu, model_relu));
// pattern_relu should perfectly match model_relu
}
// ! [ov:pattern_optional_top]


// ! [ov:pattern_optional_root]
TEST(pattern, pattern_optional_root) {
void pattern_optional_root() {
// Create a sample model
PartialShape shape{2, 2};
auto model_param1 = std::make_shared<ov::op::v0::Parameter>(element::i32, shape);
Expand All @@ -242,10 +216,6 @@ TEST(pattern, pattern_optional_root) {
auto pattern_relu = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({pattern_abs->output(0)});
auto pattern_sig_opt = ov::pass::pattern::optional<ov::op::v0::Sigmoid>(pattern_relu);

// Create a matcher and try to match the nodes
TestMatcher tm;

// Should perfectly match
ASSERT_TRUE(tm.match(pattern_relu, model_relu));
// pattern_relu should perfectly match model_relu
}
// ! [ov:pattern_optional_root]
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ int main() {
ov::AnyMap config;
//! [ov:intel_cpu:sparse_weights_decompression:part0]
ov::Core core; // Step 1: create ov::Core object
core.set_property(ov::intel_cpu::sparse_weights_decompression_rate(0.8)); // Step 1b: Enable sparse weights decompression feature
core.set_property(ov::intel_cpu::sparse_weights_decompression_rate(0.8f)); // Step 1b: Enable sparse weights decompression feature
auto model = core.read_model(modelPath); // Step 2: Read Model
//... // Step 3: Prepare inputs/outputs
//... // Step 4: Set device configuration
Expand Down
10 changes: 6 additions & 4 deletions docs/snippets/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,14 @@ endif()

file(GLOB SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.c")
file(GLOB GPU_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/gpu/*.cpp")
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.c"
"${CMAKE_CURRENT_SOURCE_DIR}/../articles_en/assets/snippets/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/../articles_en/assets/snippets/*.c")

file(GLOB GPU_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/../articles_en/assets/snippets/gpu/*.cpp")
# add GPU snippets if OpenCL has been found
if(TARGET OpenCL::OpenCL)
list(APPEND SOURCES ${GPU_SOURCES})
list(APPEND SOURCES ${GPU_SOURCES})
endif()

# try to find VA libraries
Expand All @@ -38,7 +40,7 @@ endif()
# remove OpenCV related sources
find_package(OpenCV QUIET COMPONENTS core imgcodecs)
if(NOT OpenCV_FOUND OR NOT OpenCV_VERSION VERSION_GREATER_EQUAL 3)
list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ShapeInference.cpp")
list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/../articles_en/assets/snippets/ShapeInference.cpp")
endif()

# requires mfxFrameSurface1 and MSS API
Expand Down

0 comments on commit 5ec4375

Please sign in to comment.