diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/available-opsets/opset14.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/available-opsets/opset14.rst index b06528d3b27dea..8f8f66a2e51d8a 100644 --- a/docs/articles_en/documentation/openvino-ir-format/operation-sets/available-opsets/opset14.rst +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/available-opsets/opset14.rst @@ -169,6 +169,7 @@ Table of Contents * :doc:`RNNCell <../operation-specs/sequence/rnn-cell-3>` * :doc:`RNNSequence <../operation-specs/sequence/rnn-sequence-5>` * :doc:`ROIAlign <../operation-specs/detection/roi-align-9>` +* :doc:`ROIAlignRotated <../operation-specs/detection/roi-align-rotated-14>` * :doc:`ROIPooling <../operation-specs/detection/roi-pooling-1>` * :doc:`Roll <../operation-specs/movement/roll-7>` * :doc:`Round <../operation-specs/arithmetic/round-5>` diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs.rst index 250ef955bb41a8..a39de0b72d5a8e 100644 --- a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs.rst +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs.rst @@ -188,6 +188,7 @@ Operation Specifications RNNSequence-5 ROIAlign-3 ROIAlign-9 + ROIAlignRotated-14 ROIPooling-1 Roll-7 Round-5 diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/detection/roi-align-rotated-14.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/detection/roi-align-rotated-14.rst new file mode 100644 index 00000000000000..7ec8acdd2238b6 --- /dev/null +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/detection/roi-align-rotated-14.rst @@ -0,0 +1,123 @@ +.. {#openvino_docs_ops_detection_ROIAlignRotated_14} +ROIAlignRotated +=============== + + +.. meta:: + :description: Learn about ROIAlignRotated-14 - an object detection operation, + which can be performed on three required input tensors. + + +**Versioned name**: *ROIAlignRotated-14* + +**Category**: *Object detection* + +**Short description**: *ROIAlignRotated* is a *pooling layer* used over feature maps of non-uniform input sizes and outputs a feature map of a fixed size. + +**Detailed description**: `Reference `__. + +*ROIAlignRotated* performs the following for each Region of Interest (ROI) for each input feature map: + +1. Multiply ROI box coordinates with *spatial_scale* to produce box coordinates relative to the input feature map size. +2. Rotate ROI box according to given angle in radians and *clockwise_mode*. +3. Divide the box into equal bins. One bin is mapped to single output feature map element. +4. Inside every bin, calculate regularly spaced sample points, according to the *sampling_ratio* attribute. +5. To calculate the value of single sample point, calculate further 4 points around each sample point to apply bilinear interpolation. +6. Calculate the average of all sample points in the bin to produce output feature map element. + +The 4 points used for bilinear interpolation are calculated as the closest integer coordinates to the sample point. +As an example, if the sample point is [2.14, 3.56], then the 4 integer points are [2, 3], [2, 4], [3, 3], [3, 4]. + +Each ROI box's center is shifted by [-0.5, -0.5] before pooling to achive better alignment with the closest integer coordinates used for bilinear filtering. + +**Attributes** + +* *pooled_h* + + * **Description**: *pooled_h* is the height of the ROI output feature map. + * **Range of values**: a positive integer + * **Type**: ``int`` + * **Required**: *yes* + +* *pooled_w* + + * **Description**: *pooled_w* is the width of the ROI output feature map. + * **Range of values**: a positive integer + * **Type**: ``int`` + * **Required**: *yes* + +* *sampling_ratio* + + * **Description**: *sampling_ratio* describes the number of sampling points bins over height and width to use to calculate each output feature map element. If the value is greater than 0, then ``bin_points_h = sampling_ratio`` and ``bin_points_w = sampling_ratio``. If the value is equal to 0 then adaptive number of elements over height and width is used: ``bin_points_h = ceil(roi_height / pooled_h)`` and ``bin_points_w = ceil(roi_width / pooled_w)`` respectively. The total number of sampling points for a single bin is equal to ``bin_points_w * bin_points_h``. + * **Range of values**: a non-negative integer + * **Type**: ``int`` + * **Required**: *yes* + +* *spatial_scale* + + * **Description**: *spatial_scale* is a multiplicative spatial scale factor to that is applied to the ROI box(height, weight and center vector) before pooling. + WARNING! + Spatial scale is also applied to the center point of the ROI box. It means that scaling does not only change the size of the ROI box, but also its position. + For example, if the spatial scale is 2.0, ROI box center is [0.5, 0.5], box width is 1.0 and box height is 1.0, then after scaling the ROI box center will be [1.0, 1.0], box width will be 2.0 and box height will be 2.0. + * **Range of values**: a positive floating-point number + * **Type**: ``float`` + * **Required**: *yes* + +* *clockwise_mode* + + * **Description**: If True, the angle for each ROI represents a clockwise rotation, otherwise - counterclockwise rotation. + * **Type**: ``bool`` + * **Default value**: False + * **Required**: *no* + +**Inputs**: + +* **1**: 4D input tensor of shape ``[N, C, H, W]`` with feature maps of type *T*. **Required.** + +* **2**: 2D input tensor of shape ``[NUM_ROIS, 5]`` describing ROI box consisting of 5 element tuples: ``[center_x, center_y, width, height, angle]`` in relative coordinates of type *T*. The angle is always in radians. + * **Required.** + +* **3**: 1D input tensor of shape ``[NUM_ROIS]`` with batch indices of type *IND_T*. **Required.** + +**Outputs**: + +* **1**: 4D output tensor of shape ``[NUM_ROIS, C, pooled_h, pooled_w]`` with feature maps of type *T*. + +**Types** + +* *T*: any supported floating-point type. + +* *IND_T*: any supported integer type. + + +**Example** + +.. code-block:: xml + :force: + + + + + + 7 + 256 + 200 + 200 + + + 1000 + 5 + + + 1000 + + + + + 1000 + 256 + 6 + 6 + + + diff --git a/docs/home.rst b/docs/home.rst index 6812ee27f3f804..adba092ae44892 100644 --- a/docs/home.rst +++ b/docs/home.rst @@ -1,131 +1,178 @@ ============================ -OpenVINO 2024 +OpenVINO 2024.0 ============================ .. meta:: :google-site-verification: _YqumYQ98cmXUTwtzM_0WIIadtDc6r_TMYGbmGgNvrk + +**OpenVINO is an open-source toolkit** for optimizing and deploying deep learning models from cloud +to edge. It accelerates deep learning inference across various use cases, such as generative AI, video, +audio, and language with models from popular frameworks like PyTorch, TensorFlow, ONNX, and more. +Convert and optimize models, and deploy across a mix of Intel® hardware and environments, on-premises +and on-device, in the browser or in the cloud. -.. raw:: html - - - +Check out the `OpenVINO Cheat Sheet. `__ .. container:: :name: ov-homepage-banner - OpenVINO 2024.0 - .. raw:: html +
    -
  • An open-source toolkit for optimizing and deploying deep learning models.
    Boost your AI deep-learning inference performance!
  • - -
  • Better OpenVINO integration with PyTorch!
    Use PyTorch models directly, without converting them first.
    - Learn more... +
  • +

    An open-source toolkit for optimizing and deploying deep learning models.

    +

    Boost your AI deep-learning inference performance!

    + Learn more
  • -
  • OpenVINO via PyTorch 2.0 torch.compile()
    Use OpenVINO directly in PyTorch-native applications!
    - Learn more... +
  • +

    Better OpenVINO integration with PyTorch!

    +

    Use PyTorch models directly, without converting them first.

    + Learn more
  • -
  • Do you like Generative AI? You will love how it performs with OpenVINO!
    - Check out our new notebooks... -
+
  • +

    OpenVINO via PyTorch 2.0 torch.compile()

    +

    Use OpenVINO directly in PyTorch-native applications!

    + Learn more +
  • +
  • +

    Do you like Generative AI?

    +

    You will love how it performs with OpenVINO!

    + Check out our new notebooks +
  • +
  • +

    Boost your AI deep learning interface perfmormance.

    +

    Use Intel's open-source OpenVino toolkit for optimizing and deploying deep learning models.

    + Learn more +
  • +
    - .. button-ref:: get-started - :ref-type: doc - :class: ov-homepage-banner-btn - :color: primary - :outline: +| +| - Get started +.. image:: _static/images/openvino-overview-diagram.jpg + :align: center + :alt: openvino diagram -.. rst-class:: openvino-diagram - - .. image:: _static/images/ov_homepage_diagram.png - :align: center +| +Places to Begin +++++++++++++++++++++++++++++ .. grid:: 2 2 3 3 :class-container: ov-homepage-higlight-grid - .. grid-item-card:: Performance Benchmarks - :link: about-openvino/performance-benchmarks - :link-alt: performance benchmarks - :link-type: doc - - See latest benchmark numbers for OpenVINO and OpenVINO Model Server - - .. grid-item-card:: Work with Multiple Model Formats - :link: openvino-workflow/model-preparation - :link-alt: Supported Model Formats - :link-type: doc - - OpenVINO supports different model formats: PyTorch, TensorFlow, TensorFlow Lite, ONNX, and PaddlePaddle. - - .. grid-item-card:: Deploy at Scale with OpenVINO Model Server - :link: ovms_what_is_openvino_model_server - :link-alt: model server - :link-type: doc - - Cloud-ready deployments for microservice applications - - .. grid-item-card:: Optimize Models - :link: openvino-workflow/model-optimization - :link-alt: model optimization - :link-type: doc - - Boost performance using quantization and compression with NNCF + .. grid-item-card:: Installation + :img-top: ./_static/images/home_begin_tile_01.png + :class-card: homepage_begin_tile + + This guide introduces installation and learning materials for Intel® Distribution of OpenVINO™ toolkit. + + .. button-link:: get-started/install-openvino.html + :color: primary + :outline: - .. grid-item-card:: Use OpenVINO with PyTorch Apps with torch.compile() - :link: openvino-workflow/torch-compile - :link-alt: torch.compile - :link-type: doc + Get Started - Optimize generation of the graph model with PyTorch 2.0 torch.compile() backend - - .. grid-item-card:: Optimize and Deploy Generative AI - :link: learn-openvino/llm_inference_guide - :link-alt: gen ai - :link-type: doc - - Enhance the efficiency of Generative AI - - -Feature Overview -############################## - -.. grid:: 1 2 2 2 - :class-container: ov-homepage-feature-grid - - .. grid-item-card:: Local Inference & Model Serving - - You can either link directly with OpenVINO Runtime to run inference locally or use OpenVINO Model Server - to serve model inference from a separate server or within Kubernetes environment - - .. grid-item-card:: Improved Application Portability - - Write an application once, deploy it anywhere, achieving maximum performance from hardware. Automatic device - discovery allows for superior deployment flexibility. OpenVINO Runtime supports Linux, Windows and MacOS and - provides Python, C++ and C API. Use your preferred language and OS. - - .. grid-item-card:: Minimal External Dependencies - - Designed with minimal external dependencies reduces the application footprint, simplifying installation and - dependency management. Popular package managers enable application dependencies to be easily installed and - upgraded. Custom compilation for your specific model(s) further reduces final binary size. + .. grid-item-card:: Performance Benchmarks + :img-top: ./_static/images/home_begin_tile_02.png + :class-card: homepage_begin_tile + + See latest benchmark numbers for OpenVINO and OpenVINO Model Server. + + .. button-link:: about-openvino/performance-benchmarks.html + :color: primary + :outline: + + View data + + .. grid-item-card:: Framework Compatibility + :img-top: ./_static/images/home_begin_tile_03.png + :class-card: homepage_begin_tile + + Load models directly (for TensorFlow, ONNX, PaddlePaddle) or convert to OpenVINO format. + + .. button-link:: openvino-workflow/model-preparation.html + :color: primary + :outline: + + Load your model + + .. grid-item-card:: Easy Deployment + :img-top: ./_static/images/home_begin_tile_04.png + :class-card: homepage_begin_tile + + Get started in just a few lines of code. + + .. button-link:: openvino-workflow/running-inference.html + :color: primary + :outline: + + Run Inference + + .. grid-item-card:: Serving at scale + :img-top: ./_static/images/home_begin_tile_05.png + :class-card: homepage_begin_tile + + Cloud-ready deployments for microservice applications. + + .. button-link:: openvino-workflow/running-inference.html + :color: primary + :outline: + + Try it out + + .. grid-item-card:: Model Compression + :img-top: ./_static/images/home_begin_tile_06.png + :class-card: homepage_begin_tile + + Reach for performance with post-training and training-time compression with NNCF. + + .. button-link:: openvino-workflow/model-optimization.html + :color: primary + :outline: + + Optimize now + +| + +Key Features +++++++++++++++++++++++++++++ + + +.. grid:: 2 2 2 2 + :class-container: homepage_begin_container + + .. grid-item-card:: Model Compression + :img-top: ./_static/images/home_key_feature_01.png + :class-card: homepage_begin_key + + You can either link directly with OpenVINO Runtime to run inference locally or use OpenVINO Model Server to serve model inference from a separate server or within Kubernetes environment. + + .. grid-item-card:: Fast & Scalable Deployment + :img-top: ./_static/images/home_key_feature_02.png + :class-card: homepage_begin_key + + Write an application once, deploy it anywhere, achieving maximum performance from hardware. Automatic device discovery allows for superior deployment flexibility. OpenVINO Runtime supports Linux, Windows and MacOS and provides Python, C++ and C API. Use your preferred language and OS. + + .. grid-item-card:: Lighter Deployment + :img-top: ./_static/images/home_key_feature_03.png + :class-card: homepage_begin_key + + Designed with minimal external dependencies reduces the application footprint, simplifying installation and dependency management. Popular package managers enable application dependencies to be easily installed and upgraded. Custom compilation for your specific model(s) further reduces final binary size. .. grid-item-card:: Enhanced App Start-Up Time - - In applications where fast start-up is required, OpenVINO significantly reduces first-inference latency by using the - CPU for initial inference and then switching to another device once the model has been compiled and loaded to memory. - Compiled models are cached improving start-up time even more. - + :img-top: ./_static/images/home_key_feature_04.png + :class-card: homepage_begin_key + + In applications where fast start-up is required, OpenVINO significantly reduces first-inference latency by using the CPU for initial inference and then switching to another device once the model has been compiled and loaded to memory. Compiled models are cached, improving start-up time even more. .. toctree:: diff --git a/docs/sphinx_setup/_static/css/custom.css b/docs/sphinx_setup/_static/css/custom.css index 7234909a2df28a..a919e9902c2daa 100644 --- a/docs/sphinx_setup/_static/css/custom.css +++ b/docs/sphinx_setup/_static/css/custom.css @@ -1164,9 +1164,3 @@ input:-webkit-autofill { -webkit-box-shadow: 0 0 0px 1000px white inset; } - -/* Splide carousel */ -.splide__slide { - margin-right: 2rem; - overflow: hidden; -} diff --git a/docs/sphinx_setup/_static/css/homepage_style.css b/docs/sphinx_setup/_static/css/homepage_style.css index e505be4088e517..773833257ab8a4 100644 --- a/docs/sphinx_setup/_static/css/homepage_style.css +++ b/docs/sphinx_setup/_static/css/homepage_style.css @@ -1,25 +1,74 @@ -/* overrides */ -.switcher-set, .prev-next-bottom, .bd-toc {display: none!important;} -#openvino-documentation > h1 { - display: none; +.bd-toc { + display: none !important; } h1 { - /*font-size: var(--pst-font-size-h2);*/ - /*margin-bottom: 3rem;*/ - display: none!important; + font-size: 60px !important; } +.homepage-begin-container { + padding: 0px; +} + +#ov-homepage-banner { + border-bottom: 0px; +} + +.ov-homepage-label { + font-size: 14px; + font-weight: bold; +} + +.ov-homepage-slide-title { + color: white !important; + font-size: 22px !important; + font-weight: lighter !important; +} + +.ov-homepage-slide-subtitle { + color: white !important; + font-size: 18px !important; + font-weight: lighter !important; +} + +.splide__pagination { + bottom: .8em !important; +} + +#ov-homepage-banner #splide01-slide01 { + background-color: #070862; + background-image: linear-gradient(350deg, #004CA9 0%, #381965 50%, #070862 100%); + padding: 32px 48px !important; +} -#ov-homepage-banner, .openvino-diagram, .ov-homepage-higlight-grid { - margin-bottom: 90px!important; +#ov-homepage-banner #splide01-slide02 { + background-color: #034CAA; + background-image: linear-gradient(270deg, #034CAA 00%, #4B9D77 50%, #034CAA 100%); + padding: 32px 48px !important; +} + +#ov-homepage-banner #splide01-slide03 { + background-color: #030B5E; + background-image: linear-gradient(230deg, #030B5E 0%, #285455 40%, #030B5E 100%); + padding: 32px 48px !important; +} + +#ov-homepage-banner #splide01-slide04 { + background-color: #214DA4; + background-image: linear-gradient(110deg, #214DA4 0%, #03aadd 100%); + padding: 32px 48px !important; +} + +#ov-homepage-banner #splide01-slide05 { + background-color: #034CAA; + background-image: linear-gradient(350deg, #034CAA 20%, #034CAA 30%, #4B9D77 100%); + padding: 32px 48px !important; } #ov-homepage-banner { - padding: 2rem; - background-color: #76CEFF; - background-image: linear-gradient(346deg, #728EFA 0%, #76CEFF 50%, #BBE8BD 100%); - border-bottom: 5px solid #0068b5; + p { + margin: 0.4rem 0 1.2rem 0; + } } #ov-homepage-banner p:first-of-type { @@ -37,100 +86,147 @@ h1 { #ov-homepage-banner .line-block { line-height: 1.5; text-align: left; - color: #000000; + color: white; +} + +#splide01-track { + height: calc(180px) !important; } .ov-homepage-banner-btn { - transition: 0.7s; - font-weight: bold; - background-color: #0068b5; + font-size: 14px !important; + font-weight: bold !important; color: #ffffff !important; + border: 1px solid white; + padding: 10px 18px !important; } .ov-homepage-banner-btn:hover { - background-color: white!important; - color: var(--sd-color-primary)!important; + background-color: white !important; + color: var(--sd-color-primary) !important; } -#ov-homepage-banner > p:nth-child(3) { +#ov-homepage-banner>p:nth-child(3) { margin-bottom: 0; } #ov-homepage-banner a, #ov-homepage-banner a:visited { text-decoration: none; - color: #00A3F6; + color: white; transition: .7s; font-weight: 600; } #ov-homepage-banner a:hover { - color: #653171; + color: white; } -.openvino-diagram { - width: 65%; - margin-bottom: 3rem; +#homepage_key_container { + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: space-between; + padding: 0px !important; } -@media (max-width: 720px) { - .openvino-diagram { - width: 90%; +.homepage-begin-tile { + .sd-card-img-top { + width: 80px !important; + padding: 10px; } } +.homepage-begin-tile:hover{ + box-shadow: none !important; +} + +.sd-card-img-top { + border-radius: 0; +} + +.sd-card-text { + font-size: 0.9rem !important; +} + +.homepage-begin-key { + height: 450px; + border: 0 !important; + .sd-card-body { + padding: 0% + } + .sd-card-img-top { + padding: 0px; + padding-bottom: 15px; + } +} + +.sd-btn-outline-primary { + color: #0054AE !important; + text-decoration: none !important; + background-color: #FFF !important; + border-radius: 0; + position: absolute; + bottom: 20px; +} + +.homepage-begin-tile { + border-radius: 0; + /* margin: 20px !important; */ + margin-bottom: 5px; + position: relative; + border-width: 0.2cqb; +} + +.sd-shadow-sm { + box-shadow: none !important; +} + +.homepage-begin-tile:hover { + border-color: #0054AE; +} + +.sd-btn-outline-primary:hover { + border-color: #0054AE !important; + background-color: #0054AE !important; +} + +.sd-btn-outline-primary:hover a { + color: #fff !important; + background-color: #0054AE !important; +} + .ov-homepage-higlight-grid { padding: 0; } -.ov-homepage-higlight-grid > div { - justify-content:space-evenly; - row-gap: 20px; +.ov-homepage-higlight-grid>div { + justify-content: space-evenly; + row-gap: 10px; } -.ov-homepage-higlight-grid > div > div.sd-col { - width: 230px; +.ov-homepage-higlight-grid>div>div.sd-col { + width: 280px; min-height: 300px; padding: 0; - margin-inline: 5px; + margin-inline: 0px; } -.ov-homepage-higlight-grid .sd-card { - box-shadow: 0 0 20px 5px #f3f3f3!important; - transition: 0.5s; - overflow: hidden; -} .ov-homepage-higlight-grid .sd-card-hover:hover { - border-color: var(--sd-color-card-border)!important; - transform: scale(1.00)!important; -} - -.ov-homepage-higlight-grid .sd-shadow-sm:hover { - box-shadow: 0 0 10px 2px rgba(108,36,240,0.3) !important; + border-color: var(--sd-color-card-border) !important; + transform: scale(1.00) !important; + box-shadow: none !important; } .ov-homepage-higlight-grid .sd-card-title { - height: 52.781px; + height: 10px; margin-bottom: 2rem; } .ov-homepage-higlight-grid .sd-card-text { - font-size: 0.9rem; -} - -.ov-homepage-higlight-grid .sd-card::after { - align-self: flex-end; - display: block; - content: "LEARN MORE"; - width: 100%; font-size: 0.8rem; - text-align: center; - padding-top: 0.8rem; - font-weight: 600; - color: #00A3F6; - height: 3rem; - background-color: #CDEDFF; + height: 60px; } .ov-homepage-feature-grid .sd-col { @@ -140,8 +236,7 @@ h1 { .ov-homepage-feature-grid .sd-card { border: none; - box-shadow: 0 0 20px 2px #f3f3f3!important; - /* box-shadow: none!important; */ + box-shadow: none!important; } .ov-homepage-feature-grid .sd-row { @@ -149,25 +244,13 @@ h1 { justify-content: center; } - -/* =================================================================== */ -/* @media screen and (min-width: 720px) { - main.col-xl-7.bd-content { - flex: 0 0 75%!important; - max-width: 75%!important; - } -}*/ - @media screen and (max-width: 535px) { .ov-homepage-feature-grid .sd-row { flex-direction: column; align-items: center; } + .ov-homepage-feature-grid .sd-col { max-width: 100%; } -} - -.sd-row { - --sd-gutter-x: 0rem!important; -} +} \ No newline at end of file diff --git a/docs/sphinx_setup/_static/images/home_begin_tile_01.png b/docs/sphinx_setup/_static/images/home_begin_tile_01.png new file mode 100644 index 00000000000000..51a1c2ad044a94 --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_begin_tile_01.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2851ad88d3765f49d09713bcc66c29eaa9e09d39075205fbe684de23f85483da +size 2330 diff --git a/docs/sphinx_setup/_static/images/home_begin_tile_02.png b/docs/sphinx_setup/_static/images/home_begin_tile_02.png new file mode 100644 index 00000000000000..4aa42fe9d952bf --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_begin_tile_02.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b45da5ea223dab074b0b54641049b2f4a197a3e6dfa09fbad7a4ebbea482d5a0 +size 1230 diff --git a/docs/sphinx_setup/_static/images/home_begin_tile_03.png b/docs/sphinx_setup/_static/images/home_begin_tile_03.png new file mode 100644 index 00000000000000..7b4c45d6048ef5 --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_begin_tile_03.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcb537eab14c57db2fb3a682acf1628272b75d36ab59d4e31b0e8843d03f76fd +size 3414 diff --git a/docs/sphinx_setup/_static/images/home_begin_tile_04.png b/docs/sphinx_setup/_static/images/home_begin_tile_04.png new file mode 100644 index 00000000000000..9725419c8fa2dc --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_begin_tile_04.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c81a77030d7925e2e3a274e093cac2360dd7f2a59c7db4ccf040937fb7bb2002 +size 1635 diff --git a/docs/sphinx_setup/_static/images/home_begin_tile_05.png b/docs/sphinx_setup/_static/images/home_begin_tile_05.png new file mode 100644 index 00000000000000..da5f856b0dbbc8 --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_begin_tile_05.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:886486c7771c60ef84e92aeb134776f1660a7ad2517fe47df25448bf42fb8e40 +size 2545 diff --git a/docs/sphinx_setup/_static/images/home_begin_tile_06.png b/docs/sphinx_setup/_static/images/home_begin_tile_06.png new file mode 100644 index 00000000000000..932408ca0557e3 --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_begin_tile_06.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:861fbbcb3c4439c77c03b6bf5b896748ff87c38b7f5dc086957ac3f7a3cb78ab +size 1481 diff --git a/docs/sphinx_setup/_static/images/home_key_feature_01.png b/docs/sphinx_setup/_static/images/home_key_feature_01.png new file mode 100644 index 00000000000000..2bd10f757a25d1 --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_key_feature_01.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a63ad11f17a8daea3e29214fac4112e2d7e5721bb4904347055ebc0cf9edaf7a +size 759281 diff --git a/docs/sphinx_setup/_static/images/home_key_feature_02.png b/docs/sphinx_setup/_static/images/home_key_feature_02.png new file mode 100644 index 00000000000000..0d7fdc9344fc61 --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_key_feature_02.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7639854861ec873b990d9f573e7b6f1a5d67dfcb1279f5e4fbd57f66504e4e0c +size 851859 diff --git a/docs/sphinx_setup/_static/images/home_key_feature_03.png b/docs/sphinx_setup/_static/images/home_key_feature_03.png new file mode 100644 index 00000000000000..bb4bb0f0ce1ec6 --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_key_feature_03.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:088ff078d8a4238caac7f69920b55d7c6dd6f9775c934237256205237d14a8ef +size 507372 diff --git a/docs/sphinx_setup/_static/images/home_key_feature_04.png b/docs/sphinx_setup/_static/images/home_key_feature_04.png new file mode 100644 index 00000000000000..c4b0cc56dad2a3 --- /dev/null +++ b/docs/sphinx_setup/_static/images/home_key_feature_04.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4308b7ca223434b058eac2ca1a6325fb2ac332810147e0d00ec34dc1d9cccd4 +size 447695 diff --git a/docs/sphinx_setup/_static/images/openvino-overview-diagram.jpg b/docs/sphinx_setup/_static/images/openvino-overview-diagram.jpg new file mode 100644 index 00000000000000..bfd3c6533446f3 --- /dev/null +++ b/docs/sphinx_setup/_static/images/openvino-overview-diagram.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:739d604dc4b8bae082e9c70e24328bcf9c30fa3fe5b1f884b9bd129509302b4e +size 1465073 diff --git a/docs/sphinx_setup/_static/images/ov_homepage_diagram.png b/docs/sphinx_setup/_static/images/ov_homepage_diagram.png deleted file mode 100644 index 92eca6aecebf01..00000000000000 --- a/docs/sphinx_setup/_static/images/ov_homepage_diagram.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1a48358ec0e4e256d9e7ec45dfdfe7ecf0e33f1395d67905ef2b6f659c197d76 -size 132735 diff --git a/docs/sphinx_setup/_static/js/custom.js b/docs/sphinx_setup/_static/js/custom.js index 4ad51222bd1da9..e9368ebda7145c 100644 --- a/docs/sphinx_setup/_static/js/custom.js +++ b/docs/sphinx_setup/_static/js/custom.js @@ -257,12 +257,10 @@ function addFooter() { } function initSplide() { - const slides = $('.splide__slide'); - const height = (slides.length > 4) ? 96 + ((slides.length - 4) * 16) : 96 + var splide = new Splide('.splide', { - direction : 'ttb', type : 'loop', - height : `${height}px`, + height : `230px`, perPage : 1, autoplay : true, arrows : false, diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/softmax_gpu_bf.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/softmax_gpu_bf.cl index c2ff40c796de98..b70fe7a5173acf 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/softmax_gpu_bf.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/softmax_gpu_bf.cl @@ -7,6 +7,22 @@ #include "include/batch_headers/sub_group_block_read.cl" #include "include/batch_headers/sub_group_block_write.cl" +#if IS_DYNAMIC + +#define CALC_POWER(n) ({uint pos = 0; uint i = n; do { i >>= 1; ++pos; } while (i); --pos;}) + +#define BLOCK_READ(ptr, offset) DT_INPUT_BLOCK_READ(ptr, offset) +#define BLOCK_WRITE(ptr, offset, val) DT_OUTPUT_BLOCK_WRITE(ptr, offset, val) +#define BLOCK_TYPE INPUT0_TYPE + +#define OPT_BLOCK_SIZE 8 + +#define BLOCK_READ_OPT(ptr, offset) CAT(DT_INPUT_BLOCK_READ, OPT_BLOCK_SIZE)(ptr, offset) +#define BLOCK_WRITE_OPT(ptr, offset, val) CAT(DT_OUTPUT_BLOCK_WRITE, OPT_BLOCK_SIZE)(ptr, offset, val) +#define BLOCK_TYPE_OPT MAKE_VECTOR_TYPE(INPUT0_TYPE, OPT_BLOCK_SIZE) + +#else + #if SUBGROUP_BLOCK_SIZE == 1 #define BLOCK_READ(ptr, offset) DT_INPUT_BLOCK_READ(ptr, offset) #define BLOCK_WRITE(ptr, offset, val) DT_OUTPUT_BLOCK_WRITE(ptr, offset, val) @@ -17,8 +33,6 @@ #define BLOCK_TYPE MAKE_VECTOR_TYPE(INPUT0_TYPE, SUBGROUP_BLOCK_SIZE) #endif -#if IS_DYNAMIC -#define CALC_POWER(n) ({uint pos = 0; uint i = n; do { i >>= 1; ++pos; } while (i); --pos;}) #endif REQD_SUB_GROUP_SIZE(SUB_GROUP_SIZE) @@ -64,53 +78,76 @@ KERNEL (softmax_gpu_continuous_bfyx)( const uint leftover_idx = data_set_offset + aligned_offset + workers_per_data_set * items_num + in_data_set_idx; INPUT0_TYPE my_chunk[STACK_SIZE]; - INPUT0_TYPE my_maximum = -UNIT_VAL_MAX; INPUT0_TYPE my_sum = UNIT_VAL_ZERO; __local INPUT0_TYPE lg_storage[SLM_SIZE]; // Read inputs and Get maximum value from data set uint input_idx=0; +#if IS_DYNAMIC + if (workers_per_data_set > SUB_GROUP_SIZE) + { + const uint num_iters = items_num - (items_num % OPT_BLOCK_SIZE); + for (; input_idx < num_iters; input_idx += OPT_BLOCK_SIZE) + { + BLOCK_TYPE_OPT vec_tmp = BLOCK_READ_OPT(input, aligned_data_offset + input_idx * get_sub_group_size()); + unroll_for (int j = 0; j < OPT_BLOCK_SIZE; j++) + { + my_chunk[input_idx+j] = vec_tmp[j]; + } + } + + for (; input_idx < items_num; input_idx++) + { + BLOCK_TYPE vec_tmp = BLOCK_READ(input, aligned_data_offset + input_idx * get_sub_group_size()); + my_chunk[input_idx] = vec_tmp; + } + } +#else if (workers_per_data_set > SUB_GROUP_SIZE) { for (; input_idx SUB_GROUP_SIZE) + { + const uint num_iters = items_num - (items_num % OPT_BLOCK_SIZE); + for (; output_idx < num_iters; output_idx += OPT_BLOCK_SIZE) + { + BLOCK_TYPE_OPT vec_tmp; + unroll_for (int j = 0; j < OPT_BLOCK_SIZE; j++) + { + ACTIVATION_TYPE dequantized = my_chunk[output_idx + j] / my_sum; + FUSED_OPS_MAIN; + vec_tmp[j] = FUSED_OPS_RESULT_MAIN; + } + BLOCK_WRITE_OPT(output, aligned_data_offset + output_idx * get_sub_group_size(), vec_tmp); + } + + for (; output_idx SUB_GROUP_SIZE) { for (; output_idx < items_num - (items_num % SUBGROUP_BLOCK_SIZE); output_idx+=SUBGROUP_BLOCK_SIZE) @@ -180,6 +242,7 @@ KERNEL (softmax_gpu_continuous_bfyx)( BLOCK_WRITE(output, aligned_data_offset + output_idx * get_sub_group_size(), vec_tmp); } } +#endif for (; output_idx < items_num; output_idx++) { ACTIVATION_TYPE dequantized = my_chunk[output_idx] / my_sum; @@ -200,6 +263,26 @@ KERNEL (softmax_gpu_continuous_bfyx)( FUSED_OPS_LEFTOVERS; output[leftover_idx] = FUSED_OPS_RESULT_LEFTOVERS; } +#else +#if IS_DYNAMIC + if (workers_per_data_set > SUB_GROUP_SIZE) + { + const uint num_iters = items_num - (items_num % OPT_BLOCK_SIZE); + for (; output_idx < num_iters; output_idx += OPT_BLOCK_SIZE) + { + BLOCK_TYPE_OPT vec_tmp; + unroll_for (int j = 0; j < OPT_BLOCK_SIZE; j++) + vec_tmp[j] = ACTIVATION(my_chunk[output_idx + j] / my_sum, ACTIVATION_PARAMS); + BLOCK_WRITE_OPT(output, aligned_data_offset + output_idx * get_sub_group_size(), vec_tmp); + } + + for (; output_idx < items_num; output_idx++) + { + BLOCK_TYPE vec_tmp; + vec_tmp = ACTIVATION(my_chunk[output_idx] / my_sum, ACTIVATION_PARAMS); + BLOCK_WRITE(output, aligned_data_offset + output_idx * get_sub_group_size(), vec_tmp); + } + } #else if (workers_per_data_set > SUB_GROUP_SIZE) { @@ -215,7 +298,7 @@ KERNEL (softmax_gpu_continuous_bfyx)( BLOCK_WRITE(output, aligned_data_offset + output_idx * get_sub_group_size(), vec_tmp); } } - +#endif for (; output_idx < items_num; output_idx++) { output[aligned_data_offset + get_sub_group_local_id() + output_idx * get_sub_group_size()] = ACTIVATION(my_chunk[output_idx] / my_sum, ACTIVATION_PARAMS); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp index 089f1799b1ec7b..da1328ca77083c 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/gemm/gemm_kernel_tiled_opt.cpp @@ -109,7 +109,8 @@ GemmKernelTiledOpt::GemmTuningData GemmKernelTiledOpt::SetTuningParams(const gem bool output_ndim_transposed = (params.output_order.size() > 0 && (params.output_order.back() != (static_cast(params.output_order.size()) - 1))); if ((params.transpose_input0 == 0 /*X_LAST*/) && (params.transpose_input1 == 0 /*X_LAST*/ || params.transpose_input1 == 1 /*Y_LAST*/) && (!params.indirect_input0 && !params.inputs[0].has_dynamic_pad()) - && (!output_ndim_transposed || params.fused_ops.empty())) { + && (!output_ndim_transposed || params.fused_ops.empty()) + && !params.engineInfo.supports_immad) { // - Not supports transposed input0 / transposed input1 for OTHER mode yet // - If output X dim (= N) is transposed, cannot read eltwise as aligned data tuning_data.tile_n_size = 32; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_update_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_update_kernel_ref.cpp index 59c70777f3d599..fb6ef268fc14db 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_update_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_update_kernel_ref.cpp @@ -245,7 +245,16 @@ JitConstants ScatterUpdateKernelRef::GetJitConstants(const scatter_update_params std::vector pitches; const auto& output = params.outputs[0]; if (output.is_dynamic()) { - pitches = GetDynamicPitches(output.GetDims(), params.inputs.size() + GetFusedPrimitiveInputsCount(params)); + size_t tensor_idx = params.inputs.size() + GetFusedPrimitiveInputsCount(params); + for (auto input : params.inputs) { + if (!input.is_dynamic()) + tensor_idx--; + } + for (auto fused_op : params.fused_ops) { + if (!fused_op.output_tensor.is_dynamic()) + tensor_idx--; + } + pitches = GetDynamicPitches(output.GetDims(), tensor_idx); } else { pitches = GetPlanarPitches(output.GetDims()); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_update_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_update_gpu_test.cpp index 90e2cba631cb95..e1d3945d596114 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_update_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_update_gpu_test.cpp @@ -1683,6 +1683,88 @@ TEST(scatter_update_gpu_fp32, dynamic) { } } +TEST(scatter_update_gpu_fp32, mixed_input_with_dynamic_static) { + // Dictionary : 1x2x5x2 + // Indexes : 2x1x2x1 + // Updates : 1x2x2x1x2x2 + // Axis : 2 + // Output : 1x2x5x2 + // Input values in fp32 + + auto& engine = get_test_engine(); + + auto input1_layout = layout{ ov::PartialShape::dynamic(4), data_types::f32, format::bfyx }; + auto input2_layout = layout{ ov::PartialShape{2, 1, 2, 1}, data_types::f32, format::bfyx }; + auto input3_layout = layout{ ov::PartialShape::dynamic(6), data_types::f32, format::bfyx }; + + auto input1 = engine.allocate_memory({{1, 2, 5, 2}, data_types::f32, format::bfyx}); // Dictionary + auto input2 = engine.allocate_memory({{2, 1, 2, 1}, data_types::f32, format::bfyx}); // Indices + auto input3 = engine.allocate_memory({{1, 2, 2, 1, 2, 2}, data_types::f32, format::bfwzyx}); // Updates + auto axis = 2; + + set_values(input1, { + 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, + 10.f, 11.f, 12.f, 13.f, 14.f, 15.f, 16.f, 17.f, 18.f, 19.f + }); + + set_values(input2, { + 2.f, 0.f, + 3.f, 4.f + }); + + set_values(input3, { + 20.f, 30.f, + 40.f, 50.f, + 60.f, 70.f, + 80.f, 90.f, + 100.f, 110.f, + 120.f, 130.f, + 140.f, 150.f, + 160.f, 170.f + }); + + topology topology; + topology.add(input_layout("InputDictionary", input1_layout)); + topology.add(input_layout("InputText", input2_layout)); + topology.add(input_layout("InputUpdates", input3_layout)); + + topology.add(reorder("DictionaryReordered", input_info("InputDictionary"), format::bfyx, data_types::f32)); + topology.add(reorder("TextReordered", input_info("InputText"), format::bfyx, data_types::f32)); + topology.add(scatter_update("scatter_update", + input_info("DictionaryReordered"), + input_info("TextReordered"), + input_info("InputUpdates"), + axis) + ); + topology.add(reorder("out", input_info("scatter_update"), format::bfyx, data_types::f32)); + + ExecutionConfig config; + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + network network(engine, topology, config); + + network.set_input_data("InputDictionary", input1); + network.set_input_data("InputText", input2); + network.set_input_data("InputUpdates", input3); + + auto inst = network.get_primitive("scatter_update"); + auto impl = inst->get_impl(); + ASSERT_TRUE(impl != nullptr); + ASSERT_TRUE(impl->is_dynamic()); + + auto outputs = network.execute(); + auto output = outputs.at("out").get_memory(); + cldnn::mem_lock output_ptr(output, get_test_stream()); + + std::vector expected_results = { + 40.f, 50.f, 2.f, 3.f, 20.f, 30.f, 60.f, 70.f, 80.f, 90.f, + 120.f, 130.f, 12.f, 13.f, 100.f, 110.f, 140.f, 150.f, 160.f, 170.f + }; + + for (size_t i = 0; i < expected_results.size(); ++i) { + ASSERT_EQ(expected_results[i], output_ptr[i]); + } +} + TEST(scatter_update_cpu_impl_fp32, dynamic) { // Dictionary : 1x2x5x2 // Indexes : 2x1x2x1 diff --git a/src/plugins/template/tests/functional/op_reference/cum_sum.cpp b/src/plugins/template/tests/functional/op_reference/cum_sum.cpp index dbb6b9e1dad0ef..13c23ebf0bf3ab 100644 --- a/src/plugins/template/tests/functional/op_reference/cum_sum.cpp +++ b/src/plugins/template/tests/functional/op_reference/cum_sum.cpp @@ -15,7 +15,7 @@ namespace { struct CumSumParams { // Custom axis input and attributes template - CumSumParams(const PartialShape& shape, + CumSumParams(const Shape& shape, const element::Type& iType, const std::vector& iValues, const std::vector& oValues, @@ -23,7 +23,7 @@ struct CumSumParams { const bool reverse, const element::Type& axisType, AT axisVal, - const PartialShape& axisShape) + const Shape& axisShape) : execlusive(execlusive), reverse(reverse), axisValue(axisVal), @@ -33,13 +33,13 @@ struct CumSumParams { inType(iType), outType(iType), axisData(CreateTensor(axisType, std::vector{axisVal})), - inputData(CreateTensor(iType, iValues)), - refData(CreateTensor(iType, oValues)), + inputData(CreateTensor(shape, iType, iValues)), + refData(CreateTensor(shape, iType, oValues)), testDefaults(false) {} // Default axis input and attributes template - CumSumParams(const PartialShape& shape, + CumSumParams(const Shape& shape, const element::Type& iType, const std::vector& iValues, const std::vector& oValues) @@ -47,16 +47,16 @@ struct CumSumParams { axisType(element::i32), inType(iType), outType(iType), - inputData(CreateTensor(iType, iValues)), - refData(CreateTensor(iType, oValues)), + inputData(CreateTensor(shape, iType, iValues)), + refData(CreateTensor(shape, iType, oValues)), testDefaults(true) {} bool execlusive = false; bool reverse = false; int64_t axisValue = 0; - PartialShape axisShape; - PartialShape inShape; + Shape axisShape; + Shape inShape; element::Type axisType; element::Type inType; element::Type outType; @@ -102,9 +102,9 @@ class ReferenceCumSumLayerTest : public testing::TestWithParam, pu } private: - static std::shared_ptr CreateFunction(const PartialShape& data_shape, + static std::shared_ptr CreateFunction(const Shape& data_shape, const element::Type& data_type, - const PartialShape& axis_shape, + const Shape& axis_shape, const element::Type& axis_type, const bool execlusive, const bool reverse) { @@ -114,7 +114,7 @@ class ReferenceCumSumLayerTest : public testing::TestWithParam, pu return std::make_shared(NodeVector{cum_sum}, ParameterVector{data_param, axis_param}); } - static std::shared_ptr CreateFunction(const PartialShape& data_shape, const element::Type& data_type) { + static std::shared_ptr CreateFunction(const Shape& data_shape, const element::Type& data_type) { const auto data_param = std::make_shared(data_type, data_shape); const auto cum_sum = std::make_shared(data_param); return std::make_shared(NodeVector{cum_sum}, ParameterVector{data_param}); @@ -130,14 +130,14 @@ std::vector generateCumSumParams(const element::Type& type) { using T = typename element_type_traits::value_type; std::vector opParams{ // Default axis input and attributes - CumSumParams(PartialShape{1}, type, std::vector{3}, std::vector{3}), - CumSumParams(PartialShape{6}, type, std::vector{1, 2, 3, 4, 5, 6}, std::vector{1, 3, 6, 10, 15, 21}), - CumSumParams(PartialShape{2, 4}, + CumSumParams(Shape{1}, type, std::vector{3}, std::vector{3}), + CumSumParams(Shape{6}, type, std::vector{1, 2, 3, 4, 5, 6}, std::vector{1, 3, 6, 10, 15, 21}), + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{0, 1, 2, 3, 4, 6, 8, 10}), // Custom axis input and attributes - CumSumParams(PartialShape{6}, + CumSumParams(Shape{6}, type, std::vector{1, 2, 3, 4, 5, 6}, std::vector{1, 3, 6, 10, 15, 21}, @@ -145,8 +145,8 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i32, int32_t(0), - PartialShape{}), // axis i32 - CumSumParams(PartialShape{6}, + Shape{}), // axis i32 + CumSumParams(Shape{6}, type, std::vector{1, 2, 3, 4, 5, 6}, std::vector{1, 3, 6, 10, 15, 21}, @@ -154,8 +154,8 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i64, int64_t(0), - PartialShape{}), // axis i64 - CumSumParams(PartialShape{6}, + Shape{}), // axis i64 + CumSumParams(Shape{6}, type, std::vector{1, 2, 3, 4, 5, 6}, std::vector{21, 20, 18, 15, 11, 6}, @@ -163,8 +163,8 @@ std::vector generateCumSumParams(const element::Type& type) { true, element::i64, int64_t(0), - PartialShape{}), - CumSumParams(PartialShape{6}, + Shape{}), + CumSumParams(Shape{6}, type, std::vector{1, 2, 3, 4, 5, 6}, std::vector{0, 1, 3, 6, 10, 15}, @@ -172,8 +172,8 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i64, int64_t(0), - PartialShape{}), - CumSumParams(PartialShape{6}, + Shape{}), + CumSumParams(Shape{6}, type, std::vector{1, 2, 3, 4, 5, 6}, std::vector{20, 18, 15, 11, 6, 0}, @@ -181,9 +181,9 @@ std::vector generateCumSumParams(const element::Type& type) { true, element::i64, int64_t(0), - PartialShape{}), + Shape{}), - CumSumParams(PartialShape{2, 4}, + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{0, 1, 2, 3, 4, 6, 8, 10}, @@ -191,8 +191,8 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i32, int32_t(0), - PartialShape{}), - CumSumParams(PartialShape{2, 4}, + Shape{}), + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{4, 6, 8, 10, 4, 5, 6, 7}, @@ -200,8 +200,8 @@ std::vector generateCumSumParams(const element::Type& type) { true, element::i32, int32_t(0), - PartialShape{}), - CumSumParams(PartialShape{2, 4}, + Shape{}), + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{0, 0, 0, 0, 0, 1, 2, 3}, @@ -209,8 +209,8 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i32, int32_t(0), - PartialShape{}), - CumSumParams(PartialShape{2, 4}, + Shape{}), + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{4, 5, 6, 7, 0, 0, 0, 0}, @@ -218,8 +218,8 @@ std::vector generateCumSumParams(const element::Type& type) { true, element::i32, int32_t(0), - PartialShape{}), - CumSumParams(PartialShape{2, 4}, + Shape{}), + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{0, 1, 3, 6, 4, 9, 15, 22}, @@ -227,8 +227,8 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i32, int32_t(1), - PartialShape{}), - CumSumParams(PartialShape{2, 4}, + Shape{}), + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{0, 0, 1, 3, 0, 4, 9, 15}, @@ -236,8 +236,8 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i32, int32_t(1), - PartialShape{}), - CumSumParams(PartialShape{2, 4}, + Shape{}), + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{6, 6, 5, 3, 22, 18, 13, 7}, @@ -245,8 +245,8 @@ std::vector generateCumSumParams(const element::Type& type) { true, element::i32, int32_t(1), - PartialShape{}), - CumSumParams(PartialShape{2, 4}, + Shape{}), + CumSumParams(Shape{2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7}, std::vector{6, 5, 3, 0, 18, 13, 7, 0}, @@ -254,10 +254,10 @@ std::vector generateCumSumParams(const element::Type& type) { true, element::i32, int32_t(1), - PartialShape{}), + Shape{}), CumSumParams( - PartialShape{3, 2, 4}, + Shape{3, 2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 42, 45}, @@ -265,9 +265,9 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i32, int32_t(0), - PartialShape{}), + Shape{}), CumSumParams( - PartialShape{3, 2, 4}, + Shape{3, 2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, std::vector{0, 1, 2, 3, 4, 6, 8, 10, 8, 9, 10, 11, 20, 22, 24, 26, 16, 17, 18, 19, 36, 38, 40, 42}, @@ -275,9 +275,9 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i32, int32_t(1), - PartialShape{}), + Shape{}), CumSumParams( - PartialShape{3, 2, 4}, + Shape{3, 2, 4}, type, std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, std::vector{0, 1, 3, 6, 4, 9, 15, 22, 8, 17, 27, 38, 12, 25, 39, 54, 16, 33, 51, 70, 20, 41, 63, 86}, @@ -285,7 +285,7 @@ std::vector generateCumSumParams(const element::Type& type) { false, element::i32, int32_t(2), - PartialShape{}), + Shape{}), }; return opParams; }