Skip to content

Commit

Permalink
Merge branch 'github_actions/revert_pr_caches' of https://github.com/…
Browse files Browse the repository at this point in the history
…mryzhov/openvino into github_actions/revert_pr_caches
  • Loading branch information
mryzhov committed Apr 5, 2024
2 parents a54ed4c + 72925d5 commit 0c21666
Show file tree
Hide file tree
Showing 21 changed files with 45 additions and 454 deletions.
3 changes: 0 additions & 3 deletions src/frontends/pytorch/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
#include "transformations/op_conversions/convert_convertlike.hpp"
#include "transformations/op_conversions/convert_convertpromotetypes.hpp"
#include "transformations/resolve_names_collisions.hpp"
#include "transforms.hpp"
#include "transforms/append_list_unpack_replacer.hpp"
#include "transforms/aten_cat_replacer.hpp"
#include "transforms/aten_getitem_replacer.hpp"
Expand Down Expand Up @@ -221,8 +220,6 @@ void FrontEnd::normalize(const std::shared_ptr<ov::Model>& model) const {
manager.register_pass<ov::pass::ResolveNameCollisions>(true);
manager.run_passes(model);

apply_pytorch_conversion_transforms(model);

// Usually if nn.Module.forward is given as a source model for conversion, there is the first Parameter
// that represents original `self` argument in forward(self, ...). `self` shouldn't play any role in model
// inference if model is completely frozen and all methods are inlined. So we check if it doesn't have any
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/input_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ void InputModel::set_tensor_value(const Place::Ptr& place, const void* value) {
"Provided place is invalid, only inputs are supported for setting tensor value.");
auto pytorch_place = std::dynamic_pointer_cast<pytorch::Place>(place);
FRONT_END_GENERAL_CHECK(pytorch_place, "Only place produced by PyTorch Frontend is supported");
const auto el_type = pytorch_place->m_type;
const auto p_shape = pytorch_place->m_pshape;
const auto& el_type = pytorch_place->m_type;
const auto& p_shape = pytorch_place->m_pshape;
FRONT_END_GENERAL_CHECK(el_type.is_static() && p_shape.is_static(),
"Shape and type must be statically defined before calling set_tensor_value");
auto const_node = ov::op::v0::Constant::create(el_type, p_shape.to_shape(), value);
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/node_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ OutputVector NodeContext::as_constant() const {
auto dtype = m_decoder->get_output_type(0);
if (dtype.is<type::Str>()) {
// Cannot represent string as Constant, creating FrameworkNode
auto str = m_decoder->as_string();
const auto& str = m_decoder->as_string();
auto fw_node = std::make_shared<PtFrameworkNode>(m_decoder, OutputVector{});
auto attrs = fw_node->get_attrs();
attrs["string_value"] = str;
Expand Down
10 changes: 5 additions & 5 deletions src/frontends/pytorch/src/op/full.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ OutputVector translate_full_like(const NodeContext& context) {
if (context.get_input_size() == 7 && !context.input_is_none(2)) {
return {base_translate_full_with_convert(context, sizes, value, 2)};
}
auto out = context.input_is_none(3) ? input : context.get_input(3);
const auto& out = context.input_is_none(3) ? input : context.get_input(3);
return {base_translate_full_with_convertlike(context, sizes, value, out)};
};

Expand All @@ -118,7 +118,7 @@ OutputVector translate_fill(const NodeContext& context) {
auto input = context.get_input(0);
auto value = context.get_input(1);
auto sizes = context.mark_node(std::make_shared<v3::ShapeOf>(input, element::i32));
auto out = context.input_is_none(2) ? input : context.get_input(2);
const auto& out = context.input_is_none(2) ? input : context.get_input(2);
auto result = base_translate_full_with_convertlike(context, sizes, value, out);
if (!context.input_is_none(2)) {
context.mutate_input(2, result);
Expand Down Expand Up @@ -189,7 +189,7 @@ OutputVector translate_zeros_like(const NodeContext& context) {
if (context.get_input_size() == 6 && !context.input_is_none(1)) {
return {base_translate_full_with_convert(context, sizes, value, 1)};
}
auto out = context.input_is_none(2) ? input : context.get_input(2);
const auto& out = context.input_is_none(2) ? input : context.get_input(2);
return {base_translate_full_with_convertlike(context, sizes, value, out)};
};

Expand Down Expand Up @@ -271,7 +271,7 @@ OutputVector translate_ones_like(const NodeContext& context) {
if (context.get_input_size() == 6 && !context.input_is_none(1)) {
return {base_translate_full_with_convert(context, sizes, value, 1)};
}
auto out = context.input_is_none(2) ? input : context.get_input(2);
const auto& out = context.input_is_none(2) ? input : context.get_input(2);
return {base_translate_full_with_convertlike(context, sizes, value, out)};
};

Expand Down Expand Up @@ -352,7 +352,7 @@ OutputVector translate_empty_like(const NodeContext& context) {
empty = base_translate_full_with_convertlike(context, sizes, value, input);
}
} else if (context.get_input_size() == 4) {
auto out = context.input_is_none(3) ? input : context.get_input(3);
const auto& out = context.input_is_none(3) ? input : context.get_input(3);
empty = base_translate_full_with_convertlike(context, sizes, value, out);
if (!context.input_is_none(3)) {
context.mutate_input(3, empty);
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/op/if.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ OutputVector translate_if(const NodeContext& context) {
}
OutputVector res;
const auto num_outs = context.get_output_size();
const auto then_results = then_body->get_results();
const auto else_results = else_body->get_results();
const auto& then_results = then_body->get_results();
const auto& else_results = else_body->get_results();
PYTORCH_OP_CONVERSION_CHECK(then_results.size() >= num_outs && else_results.size() >= num_outs,
"Else or then body have less outputs than prim::If requires.");
for (size_t i = 0; i < num_outs; i++) {
Expand Down
14 changes: 7 additions & 7 deletions src/frontends/pytorch/src/op/lstm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg,
"Unexpected length of list with weights for rnn operation.");

const auto w_hh = all_weights[1];
const auto w_hh_pshape = w_hh.get_partial_shape();
const auto& w_hh_pshape = w_hh.get_partial_shape();
PYTORCH_OP_CONVERSION_CHECK(w_hh_pshape.rank().is_static() && w_hh_pshape[1].is_static(), "");
const auto hidden_size = w_hh_pshape[1].get_length();

Expand Down Expand Up @@ -151,8 +151,8 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg,
weight_ih = convert_data_format(rg, variant, all_weights[idx]);
weight_hh = convert_data_format(rg, variant, all_weights[idx + 1]);
if (has_biases) {
const auto bias_ih = all_weights[idx + 2];
const auto bias_hh = all_weights[idx + 3];
const auto& bias_ih = all_weights[idx + 2];
const auto& bias_hh = all_weights[idx + 3];
bias_concat = format_bias(rg, variant, bias_ih, bias_hh);
}
} else {
Expand All @@ -163,12 +163,12 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg,
if (has_biases) {
weight_ih_f = all_weights[2 * idx];
weight_hh_f = all_weights[2 * idx + 1];
const auto bias_ih_f = all_weights[2 * idx + 2];
const auto bias_hh_f = all_weights[2 * idx + 3];
const auto& bias_ih_f = all_weights[2 * idx + 2];
const auto& bias_hh_f = all_weights[2 * idx + 3];
weight_ih_b = all_weights[2 * idx + 4];
weight_hh_b = all_weights[2 * idx + 5];
const auto bias_ih_b = all_weights[2 * idx + 6];
const auto bias_hh_b = all_weights[2 * idx + 7];
const auto& bias_ih_b = all_weights[2 * idx + 6];
const auto& bias_hh_b = all_weights[2 * idx + 7];
const auto bias_f = format_bias(rg, variant, bias_ih_f, bias_hh_f);
const auto bias_b = format_bias(rg, variant, bias_ih_b, bias_hh_b);
bias_concat = rg.make<v0::Concat>(OutputVector{bias_f, bias_b}, 0);
Expand Down
7 changes: 4 additions & 3 deletions src/frontends/pytorch/src/op/max_poolnd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ OutputVector translate_max_poolnd(const NodeContext& context) {
const auto one = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1}));
const auto two = context.mark_node(v0::Constant::create(element::i32, Shape{}, {2}));

const auto padding =
const auto& padding =
context.input_is_none(3)
? context.mark_node(std::make_shared<v0::Constant>(element::i32, Shape{pads.size()}, 0))->output(0)
: get_input_as_i32(context, 3);
Expand All @@ -84,7 +84,7 @@ OutputVector translate_max_poolnd(const NodeContext& context) {
const auto shape_of_mp = context.mark_node(std::make_shared<v3::ShapeOf>(mp, element::i32));
const auto gth_out_dims = context.mark_node(std::make_shared<v8::Gather>(shape_of_mp, dim_idxs, zero));
const auto out_sub_one = context.mark_node(std::make_shared<v1::Subtract>(gth_out_dims, one));
const auto stride_node = use_kernel ? context.get_input(1) : context.get_input(2);
const auto& stride_node = use_kernel ? context.get_input(1) : context.get_input(2);
const auto stride_node_i32 = context.mark_node(std::make_shared<v0::Convert>(stride_node, element::i32));
const auto out_mul_stride = context.mark_node(std::make_shared<v1::Multiply>(out_sub_one, stride_node_i32));

Expand All @@ -94,7 +94,8 @@ OutputVector translate_max_poolnd(const NodeContext& context) {

// apply padding on input clear pads attribute
const auto pb = context.mark_node(std::make_shared<v0::Concat>(OutputVector{pads_remaining, padding}, 0));
const auto pe = context.mark_node(std::make_shared<v0::Concat>(OutputVector{pads_remaining, selected_pads}, 0));
const auto& pe =
context.mark_node(std::make_shared<v0::Concat>(OutputVector{pads_remaining, selected_pads}, 0));
auto minus_inf =
context.mark_node(v0::Constant::create(element::f32, Shape{}, {-std::numeric_limits<float>::infinity()}));
minus_inf = context.mark_node(std::make_shared<v1::ConvertLike>(minus_inf, input));
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/op/roll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ OutputVector translate_roll(const NodeContext& context) {
bool on_flattened = context.input_is_none(2);
if (!on_flattened) {
axes = context.get_input(2);
const auto shifts_pshape = shifts.get_partial_shape();
const auto axes_pshape = axes.get_partial_shape();
const auto& shifts_pshape = shifts.get_partial_shape();
const auto& axes_pshape = axes.get_partial_shape();
on_flattened = !axes_pshape.compatible(shifts_pshape);
}
if (on_flattened) {
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/op/scatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ Output<Node> prepare_source(const NodeContext& context,
const Output<Node>& src,
const Output<Node>& index,
const Output<Node>& input) {
auto src_partial_shape = src.get_partial_shape();
const auto& src_partial_shape = src.get_partial_shape();
auto index_shape_rank = get_shape_rank(context, index);
auto index_shape = std::get<0>(index_shape_rank);
auto index_rank = std::get<1>(index_shape_rank);
Expand Down
Loading

0 comments on commit 0c21666

Please sign in to comment.