Skip to content

Commit

Permalink
Merge branch 'master' into as/npuw_parallel_for_improve
Browse files Browse the repository at this point in the history
  • Loading branch information
smirnov-alexey authored Oct 3, 2024
2 parents 2b4de53 + 1b892bf commit 9892b50
Show file tree
Hide file tree
Showing 12 changed files with 72 additions and 62 deletions.
39 changes: 14 additions & 25 deletions .github/workflows/job_pytorch_layer_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,6 @@ on:
description: 'Machine on which the tests would run'
type: string
required: true
shell:
description: "shell to override the default shell settings in the runner's operating system."
type: string
required: true
container:
description: 'JSON to be converted to the value of the "container" configuration for the job'
type: string
Expand All @@ -20,12 +16,15 @@ on:
description: 'Components that are affected by changes in the commit defined by the Smart CI Action'
type: string
required: true
python-version:
description: 'Python version to setup. E.g., "3.11"'
type: string
required: true

permissions: read-all

env:
PIP_CACHE_PATH: /mount/caches/pip/linux
PYTHON_VERSION: '3.11'

jobs:
PyTorch_Layer_Tests:
Expand All @@ -35,7 +34,7 @@ jobs:
container: ${{ fromJSON(inputs.container) }}
defaults:
run:
shell: ${{ inputs.shell }}
shell: ${{ contains(inputs.runner, 'win') && 'pwsh' || 'bash' }}
env:
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
OPENVINO_REPO: ${{ github.workspace }}/openvino
Expand All @@ -55,12 +54,6 @@ jobs:
name: openvino_tests
path: ${{ env.INSTALL_TEST_DIR }}

- name: Download OpenVINO tokenizers extension
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: openvino_tokenizers_wheel
path: ${{ env.INSTALL_DIR }}

# Needed as ${{ github.workspace }} is not working correctly when using Docker
- name: Setup Variables
if: runner.os != 'Windows'
Expand Down Expand Up @@ -98,10 +91,10 @@ jobs:
sparse-checkout-cone-mode: false
path: 'openvino'

- name: Setup Python ${{ env.PYTHON_VERSION }}
- name: Setup Python ${{ inputs.python-version }}
uses: ./openvino/.github/actions/setup_python
with:
version: ${{ env.PYTHON_VERSION }}
version: ${{ inputs.python-version }}
pip-cache-path: ${{ runner.os == 'Linux' && env.PIP_CACHE_PATH || '' }}
should-setup-pip-paths: ${{ runner.os == 'Linux' }}
self-hosted-runner: ${{ runner.os == 'Linux' }}
Expand All @@ -112,43 +105,39 @@ jobs:
# Install the core OV wheel
python3 -m pip install ${INSTALL_DIR}/tools/openvino-*.whl
# Install the core OV Tokenizers wheel
python3 -m pip install ${INSTALL_DIR}/openvino_tokenizers-*.whl
- name: Install OpenVINO Python wheels (Windows)
if: runner.os == 'Windows'
run: |
# Find and install the core OV wheel
$ovCoreWheelPath=Get-ChildItem -Path ${{ env.INSTALL_DIR }}\tools -Filter openvino-*.whl | % { $_.FullName }
python3 -m pip install "$ovCoreWheelPath"
# Find and install the core OV Tokenizers wheel
$ovCoreWheelPath=Get-ChildItem -Path ${{ env.INSTALL_DIR }} -Filter openvino_tokenizers-*.whl | % { $_.FullName }
python3 -m pip install "$ovCoreWheelPath"
- name: Install Pytorch Layer tests dependencies
run: |
# pytorch test requirements
python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/requirements_pytorch
- name: PyTorch Layer Tests
if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.arch != 'ARM64' }} # Ticket: 126287, 142196
run: python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -n logical -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml
# due to CVS-152795, parallel run is not possible on Windows
run: python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests ${PARALLEL} -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml
env:
TEST_DEVICE: CPU
TEST_PRECISION: FP32
PARALLEL: ${{ runner.os == 'Windows' && ' ' || '-n logical'}}

- name: PyTorch torch.export Layer Tests
if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.arch != 'ARM64' }} # Ticket: 126287
if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.arch != 'ARM64' && runner.os != 'Windows' }} # Ticket: 126287
run: |
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -n logical -m precommit_torch_export --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests ${PARALLEL} -m precommit_torch_export --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml
env:
TEST_DEVICE: CPU
TEST_PRECISION: FP32
PYTORCH_TRACING_MODE: EXPORT
PARALLEL: ${{ runner.os == 'Windows' && ' ' || '-n logical'}}

- name: PyTorch torch.compile TORCHFX Layer Tests
if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.os != 'macOS' && runner.arch != 'ARM64' }} # Ticket: 126287
if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.os != 'macOS' && runner.arch != 'ARM64' && runner.os != 'Windows' }} # Ticket: 126287
run: |
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -m precommit_fx_backend --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml
env:
Expand Down
15 changes: 7 additions & 8 deletions .github/workflows/job_tensorflow_layer_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,6 @@ on:
description: 'Machine on which the tests would run'
type: string
required: true
shell:
description: "shell to override the default shell settings in the runner's operating system."
type: string
required: true
container:
description: 'JSON to be converted to the value of the "container" configuration for the job'
type: string
Expand All @@ -20,12 +16,15 @@ on:
description: 'Components that are affected by changes in the commit defined by the Smart CI Action'
type: string
required: true
python-version:
description: 'Python version to setup. E.g., "3.11"'
type: string
required: true

permissions: read-all

env:
PIP_CACHE_PATH: /mount/caches/pip/linux
PYTHON_VERSION: '3.11'

jobs:
TensorFlow_Layer_Tests:
Expand All @@ -35,7 +34,7 @@ jobs:
container: ${{ fromJSON(inputs.container) }}
defaults:
run:
shell: ${{ inputs.shell }}
shell: ${{ contains(inputs.runner, 'win') && 'pwsh' || 'bash' }}
env:
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
OPENVINO_REPO: ${{ github.workspace }}/openvino
Expand Down Expand Up @@ -98,10 +97,10 @@ jobs:
sparse-checkout-cone-mode: false
path: 'openvino'

- name: Setup Python ${{ env.PYTHON_VERSION }}
- name: Setup Python ${{ inputs.python-version }}
uses: ./openvino/.github/actions/setup_python
with:
version: ${{ env.PYTHON_VERSION }}
version: ${{ inputs.python-version }}
pip-cache-path: ${{ runner.os == 'Linux' && env.PIP_CACHE_PATH || '' }}
should-setup-pip-paths: ${{ runner.os == 'Linux' }}
self-hosted-runner: ${{ runner.os == 'Linux' }}
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/linux_arm64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -173,19 +173,19 @@ jobs:
uses: ./.github/workflows/job_tensorflow_layer_tests.yml
with:
runner: 'aks-linux-16-cores-arm'
shell: bash
container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_20_04_arm64 }}", "volumes": ["/mount:/mount"]}'
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

Pytorch_Layer_Tests:
name: Pytorch Layer Tests
needs: [ Build, Docker, Smart_CI, Openvino_tokenizers ]
needs: [ Build, Docker, Smart_CI ]
uses: ./.github/workflows/job_pytorch_layer_tests.yml
with:
runner: 'aks-linux-16-cores-arm'
shell: bash
container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_20_04_arm64 }}", "volumes": ["/mount:/mount"]}'
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

CPU_Functional_Tests:
name: CPU functional tests
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/mac.yml
Original file line number Diff line number Diff line change
Expand Up @@ -276,17 +276,17 @@ jobs:
uses: ./.github/workflows/job_tensorflow_layer_tests.yml
with:
runner: 'macos-13'
shell: bash
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

Pytorch_Layer_Tests:
name: Pytorch Layer Tests
needs: [ Build, Smart_CI, Openvino_tokenizers ]
needs: [ Build, Smart_CI ]
uses: ./.github/workflows/job_pytorch_layer_tests.yml
with:
runner: 'macos-13'
shell: bash
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

CPU_Functional_Tests:
name: CPU functional tests
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/mac_arm64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -275,17 +275,17 @@ jobs:
uses: ./.github/workflows/job_tensorflow_layer_tests.yml
with:
runner: 'macos-13-xlarge'
shell: bash
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

Pytorch_Layer_Tests:
name: Pytorch Layer Tests
needs: [ Build, Smart_CI, Openvino_tokenizers ]
needs: [ Build, Smart_CI ]
uses: ./.github/workflows/job_pytorch_layer_tests.yml
with:
runner: 'macos-13-xlarge'
shell: bash
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

CPU_Functional_Tests:
name: CPU functional tests
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/ubuntu_22.yml
Original file line number Diff line number Diff line change
Expand Up @@ -305,19 +305,19 @@ jobs:
uses: ./.github/workflows/job_tensorflow_layer_tests.yml
with:
runner: 'aks-linux-4-cores-16gb'
shell: bash
container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_22_04_x64 }}", "volumes": ["/mount:/mount"]}'
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

Pytorch_Layer_Tests:
name: Pytorch Layer Tests
needs: [ Docker, Build, Smart_CI, Openvino_tokenizers ]
needs: [ Docker, Build, Smart_CI ]
uses: ./.github/workflows/job_pytorch_layer_tests.yml
with:
runner: 'aks-linux-4-cores-16gb'
shell: bash
container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_22_04_x64 }}", "volumes": ["/mount:/mount"]}'
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

CPU_Functional_Tests:
name: CPU functional tests
Expand Down
10 changes: 10 additions & 0 deletions .github/workflows/ubuntu_24.yml
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,16 @@ jobs:
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.12'

Pytorch_Layer_Tests:
name: Pytorch Layer Tests
needs: [ Docker, Build, Smart_CI ]
uses: ./.github/workflows/job_pytorch_layer_tests.yml
with:
runner: 'aks-linux-4-cores-16gb'
container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_24_04_x64 }}", "volumes": ["/mount:/mount"]}'
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.12'

Overall_Status:
name: ci/gha_overall_status_ubuntu_24
needs: [Smart_CI, Build, Debian_Packages, Samples, Python_Unit_Tests]
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/windows_vs2019_release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -404,17 +404,17 @@ jobs:
uses: ./.github/workflows/job_tensorflow_layer_tests.yml
with:
runner: 'aks-win-8-cores-16gb'
shell: pwsh
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

Pytorch_Layer_Tests:
name: Pytorch Layer Tests
needs: [ Build, Smart_CI, Openvino_tokenizers ]
needs: [ Build, Smart_CI ]
uses: ./.github/workflows/job_pytorch_layer_tests.yml
with:
runner: 'aks-win-8-cores-16gb'
shell: pwsh
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

CXX_Unit_Tests:
name: C++ unit tests
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -534,6 +534,7 @@ JitConstants FullyConnected_bf_tiled::GetJitConstants(const fully_connected_para
size_t tile_k_ofm_packed = tile_k_ofm;
size_t quantize_grp_size = get_dynamic_quantize_group_size(params);

bool add_decompress_scale_post_op = false;
WeightsType weights_dt = params.weights.GetDType();
if (weights_dt == WeightsType::UINT4 || weights_dt == WeightsType::INT4) {
tile_k_ofm_packed /= 2;
Expand All @@ -542,7 +543,7 @@ JitConstants FullyConnected_bf_tiled::GetJitConstants(const fully_connected_para
const size_t scale_group_size = params.weights.IFM().v / params.decompression_scale.Feature().v;
// Do not use SCALE_POST_OP for SLM kernel, since it demonstrates worse performance
if (scale_group_size % simd == 0 && !dispatchData.use_slm)
jit.AddConstant(MakeJitConstant("DECOMPRESSION_SCALE_POST_OP", 1));
add_decompress_scale_post_op = true;
}
if (params.weights.GetLayout() == WeightsLayout::os_is_yx_osv32_isv2) {
jit.AddConstant(MakeJitConstant("W_IDX", "fi * TILE_K + kii"));
Expand Down Expand Up @@ -619,6 +620,8 @@ JitConstants FullyConnected_bf_tiled::GetJitConstants(const fully_connected_para
jit.AddConstant(MakeJitConstant("DQ_TYPE", "char"));
jit.AddConstant(MakeJitConstant("QUANTIZE_GROUP_SIZE", quantize_grp_size));
} else {
if (add_decompress_scale_post_op)
jit.AddConstant(MakeJitConstant("DECOMPRESSION_SCALE_POST_OP", 1));
jit.AddConstant(MakeJitConstant("DYNAMIC_QUANTIZE", 0));
jit.AddConstant(MakeJitConstant("QUANTIZE_GROUP_SIZE", min_quantize_grp_size));
}
Expand Down
6 changes: 5 additions & 1 deletion tests/layer_tests/pytorch_tests/test_bitwise_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import numpy as np
import pytest
import torch
from packaging import version

from pytorch_layer_test_class import PytorchLayerTest, skip_if_export


Expand Down Expand Up @@ -69,10 +71,12 @@ def forward_not_out(self, tensor_a, out):
)
@pytest.mark.parametrize("out", [False, skip_if_export(True)])
def test_bitwise_mixed_dtypes(
self, op_type, out, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape, ie_device, precision, ir_version
self, op_type, out, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape, ie_device, precision, ir_version
):
if ie_device == "GPU" and (lhs_dtype != "bool" or rhs_dtype != "bool"):
pytest.xfail(reason="bitwise ops are not supported on GPU")
if out and version.parse(np.__version__) >= version.parse("2.0.0"):
pytest.xfail(reason="CVS-154082: incorrect handling out type")
self._test(
*self.create_model(op_type, out),
ie_device,
Expand Down
26 changes: 15 additions & 11 deletions tests/requirements_pytorch
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
# test ovc with NumPy 2.x on Ubuntu 24 with default Python 3.12
# test against NumPy 1.x with older Python versions
# optimum still requires numpy<2.0.0
numpy==1.26.4
numpy==1.26.4; python_version < "3.12"
numpy==2.1.1; python_version >= "3.12"
torch==2.4.1; platform_system != "Darwin" or platform_machine != "x86_64"
torch==2.2.0; platform_system == "Darwin" and platform_machine == "x86_64"
torch==2.2.2; platform_system == "Darwin" and platform_machine == "x86_64"
--extra-index-url https://download.pytorch.org/whl/cpu

torchvision==0.19.1
torchvision==0.19.1; platform_system != "Darwin" or platform_machine != "x86_64"
torchvision==0.17.2; platform_system == "Darwin" and platform_machine == "x86_64"
# transformers 4.45.1 is available
# but optimum still requires <4.45.0
transformers==4.44.2
Expand All @@ -13,30 +17,30 @@ pytest-html==4.1.1
pytest-xdist[psutil]==3.6.1
defusedxml==0.7.1

auto-gptq==0.7.1; platform_system == "Linux" and platform_machine == "x86_64"
auto-gptq==0.7.1; platform_system == "Linux" and platform_machine == "x86_64" and python_version < "3.12"
av==13.0.0
basicsr==1.4.2
basicsr==1.4.2; python_version < "3.12"
datasets==3.0.1
easyocr==1.7.2
facexlib==0.3.0
librosa==0.10.2
optimum==1.22.0
facexlib==0.3.0; python_version < "3.12"
librosa==0.10.2; python_version < "3.12"
optimum==1.22.0; python_version < "3.12"
packaging==24.1
pandas==2.2.3
protobuf==5.28.2
pyctcdecode==0.5.0
pyctcdecode==0.5.0; python_version < "3.12"
sacremoses==0.1.1
sentencepiece==0.2.0
soundfile==0.12.1
super-image==0.1.7
super-image==0.1.7; python_version < "3.12"
timm==1.0.8
torchaudio==2.4.1
wheel==0.44.0
PyYAML==6.0.2
kornia==0.7.3

# use latest released version once it's available
git+https://github.com/huggingface/optimum-intel.git@main
git+https://github.com/huggingface/optimum-intel.git@main; python_version < "3.12"
# set 'export HF_HUB_ENABLE_HF_TRANSFER=1' to benefits from hf_transfer
hf_transfer==0.1.8

Expand Down
Loading

0 comments on commit 9892b50

Please sign in to comment.