Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into tj/plugin/templat…
Browse files Browse the repository at this point in the history
…e/test/compare-tensor-floormod
  • Loading branch information
t-jankowski committed Apr 10, 2024
2 parents d225c2b + ca48e0b commit c535095
Show file tree
Hide file tree
Showing 23 changed files with 636 additions and 690 deletions.
1 change: 0 additions & 1 deletion .github/workflows/android_arm64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ jobs:
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/json
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/gtest
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/gflags
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/open_model_zoo
popd
- name: Clone vcpkg
Expand Down
15 changes: 10 additions & 5 deletions .github/workflows/job_debian_packages.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ jobs:
- name: Install debian packages & check conflicts
run: |
apt-get update -y
if [[ "${{ runner.arch }}" == "X64" ]]; then
# Install debian packages from previous release
apt-get install --no-install-recommends -y gnupg wget ca-certificates
Expand All @@ -66,21 +66,26 @@ jobs:
run: |
/usr/share/openvino/samples/cpp/build_samples.sh
/usr/share/openvino/samples/c/build_samples.sh
[[ "${{ runner.arch }}" == "X64" ]] && path_by_arch="intel64" || path_by_arch="aarch64"
~/openvino_cpp_samples_build/$path_by_arch/Release/hello_query_device
# check integrity of OpenVINO Python API installation
apt-get install python3-pip -y
python3 -m pip check
python3 /usr/share/openvino/samples/python/hello_query_device/hello_query_device.py
python3 -c 'from openvino import Core; Core().get_property("CPU", "AVAILABLE_DEVICES")'
if [[ "${{ runner.arch }}" == "X64" ]]; then
python3 -c 'from openvino import Core; Core().get_property("GPU", "AVAILABLE_DEVICES")'
fi
python3 -c 'from openvino import Core; Core().get_property("AUTO", "SUPPORTED_PROPERTIES")'
python3 -c 'from openvino import Core; Core().get_property("MULTI", "SUPPORTED_PROPERTIES")'
python3 -c 'from openvino import Core; Core().get_property("HETERO", "SUPPORTED_PROPERTIES")'
python3 -c 'from openvino import Core; Core().get_property("BATCH", "SUPPORTED_PROPERTIES")'
python3 -c 'from openvino.frontend import FrontEndManager; assert len(FrontEndManager().get_available_front_ends()) == 6'
benchmark_app --help
opt_in_out --help
ovc --help
110 changes: 104 additions & 6 deletions .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -760,16 +760,114 @@ jobs:
path: ${{ env.EXTENSION_BUILD_DIR }}/*.whl
if-no-files-found: 'error'

GPU_Stub:
GPU:
name: GPU ${{ matrix.TEST_TYPE }} Tests
needs: [ Build, Smart_CI ]
runs-on: ubuntu-latest
if: fromJSON(needs.smart_ci.outputs.affected_components).GPU
timeout-minutes: 80
runs-on: [ self-hosted, gpu ]
strategy:
max-parallel: 2
fail-fast: false
matrix:
TEST_TYPE: ['unit', 'func']
container:
image: ubuntu:20.04
options: --device /dev/dri:/dev/dri --group-add 109 --group-add 44
volumes:
- /dev/dri:/dev/dri
defaults:
run:
shell: bash
env:
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
INSTALL_DIR: ${{ github.workspace }}/install
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
GTEST_PARALLEL_SCRIPT: ${{ github.workspace }}/gtest_parallel.py
steps:
- name: GPU stub
- name: Download OpenVINO package
uses: actions/download-artifact@v4
with:
name: 'openvino_package'
path: ${{ env.INSTALL_DIR }}

- name: Download OpenVINO tests package
uses: actions/download-artifact@v4
with:
name: 'openvino_tests'
path: ${{ env.INSTALL_TEST_DIR }}

# Needed as ${{ github.workspace }} is not working correctly when using Docker
- name: Setup Variables
run: |
echo "This is only a stub to collect statistics of GPU runs filtered by Smart CI.
It will help us to estimate hardware requirements"
shell: bash
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV"
echo "GTEST_PARALLEL_SCRIPT=$GITHUB_WORKSPACE/gtest_parallel.py" >> "$GITHUB_ENV"
- name: Extract OpenVINO packages
run: |
pushd $INSTALL_DIR
tar -xzf openvino_package.tar.gz -C $INSTALL_DIR
popd
pushd $INSTALL_TEST_DIR
tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR
popd
- name: Install dependencies (Linux)
run: |
$INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -c=gpu -y
apt-get update && apt-get install -y wget software-properties-common ca-certificates gpg-agent tzdata
env:
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
TZ: "Europe/London" # to prevent tzdata from waiting user input

- name: Setup Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}

- name: Get gtest-parallel script
run: wget https://raw.githubusercontent.com/google/gtest-parallel/master/gtest_parallel.py

- name: Install GPU Drivers
run: |
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.15985.7/intel-igc-core_1.0.15985.7_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.15985.7/intel-igc-opencl_1.0.15985.7_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.05.28454.6/intel-level-zero-gpu-dbgsym_1.3.28454.6_amd64.ddeb
wget https://github.com/intel/compute-runtime/releases/download/24.05.28454.6/intel-level-zero-gpu_1.3.28454.6_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.05.28454.6/intel-opencl-icd-dbgsym_24.05.28454.6_amd64.ddeb
wget https://github.com/intel/compute-runtime/releases/download/24.05.28454.6/intel-opencl-icd_24.05.28454.6_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.05.28454.6/libigdgmm12_22.3.11_amd64.deb
dpkg -i *.deb
#
# Tests
#

- name: OpenVINO GPU ${{ matrix.TEST_TYPE }} Tests
run: |
source ${INSTALL_DIR}/setupvars.sh
rm -rf ${INSTALL_TEST_DIR}/gpu_${{ matrix.TEST_TYPE }}_tests && mkdir -p ${INSTALL_TEST_DIR}/gpu_${{ matrix.TEST_TYPE }}_tests
test_filter=''
if [[ "${{ matrix.TEST_TYPE }}" == "unit" ]]; then
# Ticket: 138018
test_filter='-*scatter_nd_update_gpu.dynamic_padded_output*:*border_gpu.basic_zero_input*:*bicubic_zeros_no_align_data1x1*:*bicubic_border_align_batches*:*bilinear_zeros_no_align_data1x1*:*non_zero_gpu.empty_input*:*mark_shape_of_subgraphs.concat_with_empty_tensor_inputs*:*concat_cpu_impl.dynamic_4d_f*:*border_gpu.basic_zero_input_dynamic*:*network_test.model_with_empty_input_is_not_dynamic*:*bicubic_zeros_align_data1x1*'
else
test_filter='*smoke*'
fi
python3 ${GTEST_PARALLEL_SCRIPT} ${INSTALL_TEST_DIR}/ov_gpu_${{ matrix.TEST_TYPE }}_tests --dump_json_test_results=${INSTALL_TEST_DIR}/gpu_${{ matrix.TEST_TYPE }}_tests/ov_gpu_${{ matrix.TEST_TYPE }}_tests.json -- --report_unique_name --gtest_filter=$test_filter
- name: Upload Test Results
uses: actions/upload-artifact@v4
if: always()
with:
name: test-results-${{ matrix.TEST_TYPE }}-gpu
path: ${{ env.INSTALL_TEST_DIR }}/gpu_${{ matrix.TEST_TYPE }}_tests
if-no-files-found: 'error'

Overall_Status:
name: ci/gha_overall_status
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/linux_arm64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,9 @@ jobs:
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 30
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 30
# For building the latest h5py
apt install --assume-yes --no-install-recommends libhdf5-dev
- name: Install sccache
uses: mozilla-actions/sccache-action@v0.0.4
with:
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/linux_riscv.yml
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ jobs:
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/json
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/gtest
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/gflags
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/telemetry
git submodule update --init -- ${OPENVINO_REPO}/src/plugins/intel_cpu
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/open_model_zoo
popd
Expand Down
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -78,3 +78,6 @@
[submodule "src/plugins/intel_npu/thirdparty/level-zero-ext"]
path = src/plugins/intel_npu/thirdparty/level-zero-ext
url = https://github.com/intel/level-zero-npu-extensions.git
[submodule "thirdparty/telemetry"]
path = thirdparty/telemetry
url = https://github.com/openvinotoolkit/telemetry.git
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,19 @@ This section explains how to convert the YOLOv4 Keras model from the `repository
python keras-YOLOv3-model-set/tools/model_converter/convert.py <path_to_cfg_file>/yolov4-tiny.cfg <path_to_weights>/yolov4-tiny.weights <saved_model_dir>
4. Run model conversion for from the TensorFlow 2 format to an IR:
4. Run model conversion from the TensorFlow 2 to an IR format:

.. note::

Before you run the conversion, make sure you have installed all the model conversion API dependencies for TensorFlow 2.

If you get errors, you may need to add the additional step to divide the input by 255:

.. code-block:: sh
--scale_values=image_input[255]
.. code-block:: sh
mo --saved_model_dir yolov4 --output_dir models/IRs --input_shape [1,608,608,3] --model_name yolov4
Expand Down
1 change: 1 addition & 0 deletions src/bindings/js/node/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ add_library(${PROJECT_NAME} SHARED
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/resize_algorithm.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/errors.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/helper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/type_validation.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/tensor.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/infer_request.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/compiled_model.cpp
Expand Down
3 changes: 3 additions & 0 deletions src/bindings/js/node/include/helper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,9 @@ ov::Tensor get_request_tensor(ov::InferRequest& infer_request, const size_t idx)
/** @brief Creates ov::tensor from TensorWrap Object */
ov::Tensor cast_to_tensor(const Napi::Value& value);

/** @brief Creates ov::tensor from Napi::CallbackInfo value at specified index. */
ov::Tensor cast_to_tensor(const Napi::CallbackInfo& info, int index);

/** @brief Creates ov::tensor from TypedArray using given shape and element type*/
ov::Tensor cast_to_tensor(const Napi::TypedArray& data, const ov::Shape& shape, const ov::element::Type_t& type);

Expand Down
11 changes: 11 additions & 0 deletions src/bindings/js/node/include/type_validation.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#pragma once
#include <napi.h>

#include "node/include/addon.hpp"
#include "openvino/openvino.hpp"

/** @brief Checks if Napi::Value is a TensorWrap.*/
bool is_tensor(const Napi::Env& env, const Napi::Value& value);
9 changes: 9 additions & 0 deletions src/bindings/js/node/src/helper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include "node/include/helper.hpp"

#include "node/include/tensor.hpp"
#include "node/include/type_validation.hpp"

const std::vector<std::string>& get_supported_types() {
static const std::vector<std::string> supported_element_types =
Expand Down Expand Up @@ -303,6 +304,14 @@ ov::Tensor cast_to_tensor(const Napi::Value& value) {
}
}

ov::Tensor cast_to_tensor(const Napi::CallbackInfo& info, int index) {
if (!is_tensor(info.Env(), info[index])) {
OPENVINO_THROW(std::string("Argument #" + std::to_string(index) + " must be a Tensor."));
}
const auto tensor_wrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[index].ToObject());
return tensor_wrap->get_tensor();
}

ov::Tensor cast_to_tensor(const Napi::TypedArray& typed_array,
const ov::Shape& shape,
const ov::element::Type_t& type) {
Expand Down
56 changes: 31 additions & 25 deletions src/bindings/js/node/src/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,39 +53,45 @@ Napi::Object InferRequestWrap::wrap(Napi::Env env, ov::InferRequest infer_reques
}

void InferRequestWrap::set_tensor(const Napi::CallbackInfo& info) {
if (info.Length() != 2 || !info[0].IsString() || !info[1].IsObject()) {
reportError(info.Env(), "InferRequest.setTensor() invalid argument.");
} else {
std::string name = info[0].ToString();
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[1].ToObject());
_infer_request.set_tensor(name, tensorWrap->get_tensor());
try {
if (info.Length() != 2 || !info[0].IsString() || !info[1].IsObject()) {
OPENVINO_THROW(std::string("InferRequest.setTensor() invalid argument."));
} else {
const std::string& name = info[0].ToString();
_infer_request.set_tensor(name, cast_to_tensor(info, 1));
}
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
}

void InferRequestWrap::set_input_tensor(const Napi::CallbackInfo& info) {
if (info.Length() == 1 && info[0].IsObject()) {
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[0].ToObject());
_infer_request.set_input_tensor(tensorWrap->get_tensor());
} else if (info.Length() == 2 && info[0].IsNumber() && info[1].IsObject()) {
auto idx = info[0].ToNumber().Int32Value();
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[1].ToObject());
_infer_request.set_input_tensor(idx, tensorWrap->get_tensor());
} else {
reportError(info.Env(), "InferRequest.setInputTensor() invalid argument.");
try {
if (info.Length() == 1 && info[0].IsObject()) {
_infer_request.set_input_tensor(cast_to_tensor(info, 0));
} else if (info.Length() == 2 && info[0].IsNumber() && info[1].IsObject()) {
const auto idx = info[0].ToNumber().Int32Value();
_infer_request.set_input_tensor(idx, cast_to_tensor(info, 1));
} else {
OPENVINO_THROW(std::string("InferRequest.setInputTensor() invalid argument."));
}
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
}

void InferRequestWrap::set_output_tensor(const Napi::CallbackInfo& info) {
if (info.Length() == 1) {
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[0].ToObject());
auto t = tensorWrap->get_tensor();
_infer_request.set_output_tensor(t);
} else if (info.Length() == 2 && info[0].IsNumber() && info[1].IsObject()) {
auto idx = info[0].ToNumber().Int32Value();
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[1].ToObject());
_infer_request.set_output_tensor(idx, tensorWrap->get_tensor());
} else {
reportError(info.Env(), "InferRequest.setOutputTensor() invalid argument.");
try {
if (info.Length() == 1 && info[0].IsObject()) {
_infer_request.set_output_tensor(cast_to_tensor(info, 0));
} else if (info.Length() == 2 && info[0].IsNumber() && info[1].IsObject()) {
const auto idx = info[0].ToNumber().Int32Value();
_infer_request.set_output_tensor(idx, cast_to_tensor(info, 1));
} else {
OPENVINO_THROW(std::string("InferRequest.setOutputTensor() invalid argument."));
}
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
}

Expand Down
9 changes: 9 additions & 0 deletions src/bindings/js/node/src/type_validation.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#include "node/include/type_validation.hpp"

bool is_tensor(const Napi::Env& env, const Napi::Value& value) {
const auto& prototype = env.GetInstanceData<AddonData>()->tensor;
return value.ToObject().InstanceOf(prototype.Value().As<Napi::Function>());
}
Loading

0 comments on commit c535095

Please sign in to comment.