From 62da6bf762951938fc37769dbd0fc5c4a3d1037c Mon Sep 17 00:00:00 2001 From: vraspar Date: Tue, 16 Jul 2024 14:55:58 -0700 Subject: [PATCH 1/9] Add support for concat op --- .../coreml/builders/impl/builder_utils.cc | 8 ++ .../coreml/builders/impl/builder_utils.h | 9 +++ .../coreml/builders/impl/concat_op_builder.cc | 76 +++++++++++++------ .../apple/coreml_supported_mlprogram_ops.md | 1 + 4 files changed, 72 insertions(+), 22 deletions(-) diff --git a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc index 2fcf9a1d7d9b..c0f7e9fea83f 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc @@ -313,6 +313,14 @@ void AddOperationInput(MILSpec::Operation& op, std::string_view input_name, std: (*op.mutable_inputs())[input_name] = std::move(arg); } +void AddOperationInputs(MILSpec::Operation& op, std::string_view input_name, + const std::vector& value_names) { + MILSpec::Argument& arg = (*op.mutable_inputs())[input_name]; + for (const auto& value : value_names) { + arg.mutable_arguments()->Add()->set_name(std::string(value)); + } +} + void AddOperationOutput(COREML_SPEC::MILSpec::Operation& op, const NodeArg& output) { auto& outputs = *op.mutable_outputs(); auto& output_arg = *outputs.Add(); diff --git a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h index 97fb83b6dc48..a0afea4ac61c 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h +++ b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h @@ -129,6 +129,15 @@ COREML_SPEC::MILSpec::NamedValueType CreateNamedTensorValueType(const NodeArg& n void AddOperationInput(COREML_SPEC::MILSpec::Operation& op, std::string_view input_name, std::string_view value_name); +/// +/// Add a variadic input argument to a MILSpec::Operation +/// +/// Operation to update. +/// The input name defined by the spec for the operation. +/// The +void AddOperationInputs(COREML_SPEC::MILSpec::Operation& op, std::string_view input_name, + const std::vector& value_names); + /// /// Add an output to a MILSpec::Operation. Name, data type and shape are used from the NodeArg. /// diff --git a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc index 34193318a026..c7a8748c0bf0 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc @@ -18,27 +18,52 @@ class ConcatOpBuilder : public BaseOpBuilder { bool IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params, const logging::Logger& logger) const override; + + bool SupportsMLProgram() const override { return true; } }; Status ConcatOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node, const logging::Logger& logger) const { - std::unique_ptr layer = model_builder.CreateNNLayer(node); - - layer->mutable_concat()->set_sequenceconcat(false); - - for (const auto* input : node.InputDefs()) { - LOGS(logger, VERBOSE) << "input name " << input->Name(); - *layer->mutable_input()->Add() = input->Name(); +#if defined(COREML_ENABLE_PROGRAM) + if (model_builder.CreateMLProgram()) { + using namespace CoreML::Specification::MILSpec; + + NodeAttrHelper helper(node); + const auto axis = helper.GetInt64("axis"); // required + const auto interleave = false; + + std::unique_ptr op = model_builder.CreateOperation(node, "concat"); + std::vector input_names; + for (const auto* input : node.InputDefs()) { + input_names.emplace_back(input->Name()); + } + AddOperationInputs(*op, "values", input_names); + AddOperationInput(*op, "axis", model_builder.AddConstant(op->type(), "axis", *axis)); + AddOperationInput(*op, "interleave", model_builder.AddConstant(op->type(), "interleave", *interleave); + AddOperationOutput(*op, *node.OutputDefs()[0]); + model_builder.AddOperation(std::move(op)); + + } else +#endif // defined(COREML_ENABLE_MLPROGRAM) + { + std::unique_ptr layer = model_builder.CreateNNLayer(node); + + layer->mutable_concat()->set_sequenceconcat(false); + + for (const auto* input : node.InputDefs()) { + LOGS(logger, VERBOSE) << "input name " << input->Name(); + *layer->mutable_input()->Add() = input->Name(); + } + + *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); + + model_builder.AddLayer(std::move(layer)); } - - *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); - - model_builder.AddLayer(std::move(layer)); return Status::OK(); } -bool ConcatOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& /* input_params */, +bool ConcatOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params, const logging::Logger& logger) const { const auto& input_defs = node.InputDefs(); if (input_defs.size() < 2) { @@ -51,16 +76,6 @@ bool ConcatOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputPa return false; auto rank = input_shape.size(); - if (rank != 4) { - // For some reason, the concat in CoreML running on 3d tensor will concat on wrong axis - // Instead of concat on axis 0, it will concat on axis 1 - // Disable Concat support for 3d tensor for now - // TODO, add ExpandDims and Squeeze, 3d -ExpandDims-> 4d -> Concat -Squeeze-> 3d - LOGS(logger, VERBOSE) << "Concat only support 4d shape for now, input is " - << rank << "d shape"; - return false; - } - NodeAttrHelper helper(node); auto axis = static_cast(HandleNegativeAxis(helper.Get("axis", 1), rank)); if (rank != axis + 3) { @@ -69,6 +84,23 @@ bool ConcatOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputPa return false; } +#if defined(COREML_ENABLE_MLPROGRAM) + if (input_params.create_mlprogram) { + return true; + } else +#endif // (COREML_ENABLE_MLPROGRAM) + { + if (rank != 4) { + // For some reason, the concat in CoreML running on 3d tensor will concat on wrong axis + // Instead of concat on axis 0, it will concat on axis 1 + // Disable Concat support for 3d tensor for now + // TODO, add ExpandDims and Squeeze, 3d -ExpandDims-> 4d -> Concat -Squeeze-> 3d + LOGS(logger, VERBOSE) << "Concat only support 4d shape for now, input is " + << rank << "d shape"; + return false; + } + } + return true; } diff --git a/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md b/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md index 9961d2668e6f..286eb320d233 100644 --- a/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md +++ b/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md @@ -20,3 +20,4 @@ Keep in sync with doco generated from /docs/execution-providers/CoreML-Execution |ai.onnx:Sub|| |ai.onnx:Sigmoid|| |ai:onnx:Tanh|| +|ai:onnx:Concat|| From 6cfbc507202f140793f3dbea4957a1471be1166d Mon Sep 17 00:00:00 2001 From: vraspar Date: Wed, 17 Jul 2024 15:25:06 -0700 Subject: [PATCH 2/9] do not check for rank 4 with ml program fix lint issues --- .../core/providers/coreml/builders/impl/concat_op_builder.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc index c7a8748c0bf0..4c05d7ffeb8c 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc @@ -36,6 +36,7 @@ Status ConcatOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, std::unique_ptr op = model_builder.CreateOperation(node, "concat"); std::vector input_names; for (const auto* input : node.InputDefs()) { + LOGS(logger, VERBOSE) << "input name " << input->Name(); input_names.emplace_back(input->Name()); } AddOperationInputs(*op, "values", input_names); From b6763d1deec67c05bb09c82f52c2737115d09cfe Mon Sep 17 00:00:00 2001 From: vraspar Date: Fri, 19 Jul 2024 15:15:14 -0700 Subject: [PATCH 3/9] fix issues with concat ops --- .../coreml/builders/impl/concat_op_builder.cc | 32 ++- .../core/providers/coreml/model/model.mm | 231 +++++++++++++++--- 2 files changed, 211 insertions(+), 52 deletions(-) diff --git a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc index 4c05d7ffeb8c..76ec84ee3460 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc @@ -4,6 +4,7 @@ #include "core/providers/common.h" #include "core/providers/coreml/builders/helper.h" #include "core/providers/coreml/builders/impl/base_op_builder.h" +#include "core/providers/coreml/builders/impl/builder_utils.h" #include "core/providers/coreml/builders/model_builder.h" #include "core/providers/coreml/builders/op_builder_factory.h" #include "core/providers/coreml/shape_utils.h" @@ -25,7 +26,7 @@ class ConcatOpBuilder : public BaseOpBuilder { Status ConcatOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node, const logging::Logger& logger) const { -#if defined(COREML_ENABLE_PROGRAM) +#if defined(COREML_ENABLE_MLPROGRAM) if (model_builder.CreateMLProgram()) { using namespace CoreML::Specification::MILSpec; @@ -40,8 +41,8 @@ Status ConcatOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, input_names.emplace_back(input->Name()); } AddOperationInputs(*op, "values", input_names); - AddOperationInput(*op, "axis", model_builder.AddConstant(op->type(), "axis", *axis)); - AddOperationInput(*op, "interleave", model_builder.AddConstant(op->type(), "interleave", *interleave); + AddOperationInput(*op, "axis", model_builder.AddScalarConstant(op->type(), "axis", *axis)); + AddOperationInput(*op, "interleave", model_builder.AddScalarConstant(op->type(), "interleave", interleave)); AddOperationOutput(*op, *node.OutputDefs()[0]); model_builder.AddOperation(std::move(op)); @@ -76,21 +77,8 @@ bool ConcatOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputPa if (!GetShape(*input_defs[0], input_shape, logger)) return false; - auto rank = input_shape.size(); - NodeAttrHelper helper(node); - auto axis = static_cast(HandleNegativeAxis(helper.Get("axis", 1), rank)); - if (rank != axis + 3) { - LOGS(logger, VERBOSE) << "Concat only support axis to be -3, actual axis: " << axis - << ", actual rank: " << rank; - return false; - } - -#if defined(COREML_ENABLE_MLPROGRAM) - if (input_params.create_mlprogram) { - return true; - } else -#endif // (COREML_ENABLE_MLPROGRAM) - { + if (!input_params.create_mlprogram) { + auto rank = input_shape.size(); if (rank != 4) { // For some reason, the concat in CoreML running on 3d tensor will concat on wrong axis // Instead of concat on axis 0, it will concat on axis 1 @@ -100,6 +88,14 @@ bool ConcatOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputPa << rank << "d shape"; return false; } + + NodeAttrHelper helper(node); + auto axis = static_cast(HandleNegativeAxis(helper.Get("axis", 1), rank)); + if (rank != axis + 3) { + LOGS(logger, VERBOSE) << "Concat only support axis to be -3, actual axis: " << axis + << ", actual rank: " << rank; + return false; + } } return true; diff --git a/onnxruntime/core/providers/coreml/model/model.mm b/onnxruntime/core/providers/coreml/model/model.mm index 1d506099b436..a6df472385d4 100644 --- a/onnxruntime/core/providers/coreml/model/model.mm +++ b/onnxruntime/core/providers/coreml/model/model.mm @@ -26,6 +26,13 @@ #include "core/providers/coreml/model/objc_str_utils.h" #include "core/providers/coreml/shape_utils.h" +// manually enable to test logic for handling non-contiguous MLMultiArray as we don't have a unit test setup +// that can hit that. +// #define TEST_MLMULTIARRAY_HANDLING +#ifdef TEST_MLMULTIARRAY_HANDLING +#include +#endif + // force the linker to create a dependency on the CoreML framework so that in MAUI usage we don't need // to manually do this asm(".linker_option \"-framework\", \"CoreML\""); @@ -174,51 +181,197 @@ Status CreateInputFeatureProvider(const std::unordered_map= *block_size, "Logic error calculating copy info"); + ORT_ENFORCE(*stride * *num_blocks == total_elems, "Logic error calculating copy info"); + + return Status::OK(); +} + +#ifdef TEST_MLMULTIARRAY_HANDLING +void ValidateGetInfo(MLMultiArray* array, + int64_t expected_num_blocks, int64_t expected_block_size, int64_t expected_stride, bool valid) { + int64_t num_blocks = 0; + int64_t block_size = 0; + int64_t stride = 0; + auto status = GetMLMultiArrayCopyInfo(array, &num_blocks, &block_size, &stride); + + if (!valid) { + assert(!status.IsOK()); + return; + } + + assert(status.IsOK()); + assert(num_blocks == expected_num_blocks); + assert(block_size == expected_block_size); + assert(stride == expected_stride); +} + +void ValidateMLMultiArrayHandling() { + void* data = reinterpret_cast(0xfeedf00d); + + // dim -1 with stride + { + NSArray* shape = @[ @1, @1, @8, @8 ]; + NSArray* strides = @[ @128, @128, @16, @2 ]; + + auto* array = [[MLMultiArray alloc] initWithDataPointer:data + shape:shape + dataType:MLMultiArrayDataTypeInt32 + strides:strides + deallocator:^(void* /* bytes */) { + } + error:nil]; + ValidateGetInfo(array, 64, 1, 2, true); + } + + // dim -2 with stride + { + NSArray* shape = @[ @1, @1, @8, @8 ]; + NSArray* strides = @[ @128, @128, @16, @1 ]; + + auto* array = [[MLMultiArray alloc] initWithDataPointer:data + shape:shape + dataType:MLMultiArrayDataTypeInt32 + strides:strides + deallocator:^(void* /* bytes */) { + } + error:nil]; + ValidateGetInfo(array, 8, 8, 16, true); + } + + // dim -3 with stride + { + NSArray* shape = @[ @1, @2, @4, @4 ]; + NSArray* strides = @[ @48, @24, @4, @1 ]; + + auto* array = [[MLMultiArray alloc] initWithDataPointer:data + shape:shape + dataType:MLMultiArrayDataTypeInt32 + strides:strides + deallocator:^(void* /* bytes */) { + } + error:nil]; + + ValidateGetInfo(array, 2, 16, 24, true); + } + + // two non-contiguous dims + { + // dim + NSArray* shape = @[ @1, @2, @4, @4 ]; + NSArray* strides = @[ @96, @48, @8, @1 ]; + + auto* array = [[MLMultiArray alloc] initWithDataPointer:data + shape:shape + dataType:MLMultiArrayDataTypeInt32 + strides:strides + deallocator:^(void* /* bytes */) { + } + error:nil]; + + ValidateGetInfo(array, 0, 0, 0, false); + } } +#endif Status CopyMLMultiArrayBuffer(const void* mlmultiarray_buffer, void* tensor_buffer, - const MLMultiArray* array_info, - const OnnxTensorInfo* tensor_info, - const std::optional mlmultiarray_buffer_size) { + const MLMultiArray* array, + const int64_t num_blocks, const int64_t block_size, const int64_t stride, + const OnnxTensorInfo* tensor_info) { if (mlmultiarray_buffer == nullptr) { return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "mlmultiarray_buffer has no data"); } - const size_t num_elements = array_info.count; + // total including non-contiguous space + + int64_t array_total_elements = [array.strides[0] longLongValue] * [array.shape[0] longLongValue]; + const int64_t num_elements = array.count; + + ORT_RETURN_IF(array_total_elements != num_blocks * stride || + num_elements != num_blocks * block_size, + "MLMultiArray size does not match the copy info"); + const auto onnx_data_type = tensor_info->data_type; switch (onnx_data_type) { case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: { - const auto output_data_byte_size = num_elements * sizeof(float); - ORT_RETURN_IF_NOT(!mlmultiarray_buffer_size || mlmultiarray_buffer_size == output_data_byte_size, - "CoreML output buffer size and expected output size differ"); - memcpy(tensor_buffer, mlmultiarray_buffer, output_data_byte_size); + const auto* src_buffer = static_cast(mlmultiarray_buffer); + auto* dst_buffer = static_cast(tensor_buffer); + const auto block_byte_size = block_size * sizeof(float); + + for (int64_t idx = 0; idx < num_blocks; ++idx) { + memcpy(dst_buffer, src_buffer, block_byte_size); + src_buffer += stride; + dst_buffer += block_size; + } break; } case ONNX_NAMESPACE::TensorProto_DataType_INT32: { - const auto output_data_byte_size = num_elements * sizeof(int32_t); - ORT_RETURN_IF_NOT(!mlmultiarray_buffer_size || mlmultiarray_buffer_size == output_data_byte_size, - "CoreML output buffer size and expected output size differ"); - memcpy(tensor_buffer, mlmultiarray_buffer, output_data_byte_size); + const auto* src_buffer = static_cast(mlmultiarray_buffer); + auto* dst_buffer = static_cast(tensor_buffer); + const auto block_byte_size = block_size * sizeof(int32_t); + + for (int64_t idx = 0; idx < num_blocks; ++idx) { + memcpy(dst_buffer, src_buffer, block_byte_size); + src_buffer += stride; + dst_buffer += block_size; + } + break; } // For this case, since Coreml Spec only uses int32 for model output while onnx provides // int64 for model output data type. We are doing a type casting (int32 -> int64) here // when copying the model to ORT case ONNX_NAMESPACE::TensorProto_DataType_INT64: { - ORT_RETURN_IF_NOT(array_info.dataType == MLMultiArrayDataTypeInt32, - "CoreML output data type is not MLMultiArrayDataTypeInt32"); - ORT_RETURN_IF_NOT(!mlmultiarray_buffer_size || mlmultiarray_buffer_size == num_elements * sizeof(int32_t), - "CoreML output buffer size and expected output size differ"); - const auto model_output_span = gsl::span{static_cast(mlmultiarray_buffer), num_elements}; - const auto output_span = gsl::span{static_cast(tensor_buffer), num_elements}; - std::transform(model_output_span.begin(), model_output_span.end(), output_span.begin(), - [](int32_t v) { return static_cast(v); }); + ORT_RETURN_IF(array.dataType != MLMultiArrayDataTypeInt32, + "CoreML output data type is not MLMultiArrayDataTypeInt32"); + + const int32_t* src_buffer = static_cast(mlmultiarray_buffer); + int64_t* dst_buffer = static_cast(tensor_buffer); + + for (int64_t idx = 0; idx < num_blocks; ++idx) { + auto input_span = gsl::span{src_buffer, static_cast(block_size)}; + auto output_span = gsl::span{dst_buffer, static_cast(block_size)}; + std::transform(input_span.begin(), input_span.end(), output_span.begin(), + [](int32_t v) { return static_cast(v); }); + + src_buffer += stride; + dst_buffer += block_size; + } break; } default: @@ -250,8 +403,7 @@ - (void)dealloc; - (Status)loadModel API_AVAILABLE_COREML3; - (Status)predict:(const std::unordered_map&)inputs outputs:(const std::unordered_map&)outputs - getOutputTensorDataFn:(const GetOutputTensorMutableRawDataFn&) - get_output_tensor_mutable_raw_data_fn + getOutputTensorDataFn:(const GetOutputTensorMutableRawDataFn&)get_output_tensor_mutable_raw_data_fn API_AVAILABLE_COREML3; @property(nullable) MLModel* model API_AVAILABLE_COREML3; @@ -397,21 +549,27 @@ - (Status)predict:(const std::unordered_map&)inputs ") do not match"); } - ORT_RETURN_IF_NOT(IsArrayContiguous(data), - "Non-contiguous output MLMultiArray is not currently supported"); + // support a non-contiguous array, provided only one dimension is not contiguous + int64_t num_blocks = 0; + int64_t block_size = 0; + int64_t stride = 0; + + ORT_RETURN_IF_ERROR(GetMLMultiArrayCopyInfo(data, &num_blocks, &block_size, &stride)); + __block Status copy_status; const auto* tensor_info = &output_tensor_info; // `getBytesWithHandler` replaces deprecated `.dataPointer` on new versions if (@available(macOS 12.3, iOS 15.4, *)) { [data getBytesWithHandler:^(const void* bytes, NSInteger size) { - copy_status = CopyMLMultiArrayBuffer(bytes, output_buffer, data, tensor_info, size); + copy_status = CopyMLMultiArrayBuffer(bytes, output_buffer, data, + num_blocks, block_size, stride, tensor_info); }]; } else { - // disable size check as old API does not return buffer length - copy_status = CopyMLMultiArrayBuffer(data.dataPointer, output_buffer, data, tensor_info, std::nullopt); + copy_status = CopyMLMultiArrayBuffer(data.dataPointer, output_buffer, data, + num_blocks, block_size, stride, tensor_info); } - if (!copy_status.IsOK()) - return copy_status; + + ORT_RETURN_IF_ERROR(copy_status); } } } @@ -508,6 +666,11 @@ Status Predict(const std::unordered_map& inputs, Model::~Model() {} Status Model::LoadModel() { + // arbitrary place to run this when manually enabled for temporary testing +#ifdef TEST_MLMULTIARRAY_HANDLING + ValidateMLMultiArrayHandling(); +#endif + return execution_->LoadModel(); } From a4500ca1c5a366536b53bc92fd606000c8c37db5 Mon Sep 17 00:00:00 2001 From: vraspar Date: Mon, 22 Jul 2024 10:39:09 -0700 Subject: [PATCH 4/9] Update onnxruntime/core/providers/coreml/builders/impl/builder_utils.h Co-authored-by: Scott McKay --- .../core/providers/coreml/builders/impl/builder_utils.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h index a0afea4ac61c..7387b712c997 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h +++ b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.h @@ -132,9 +132,9 @@ void AddOperationInput(COREML_SPEC::MILSpec::Operation& op, /// /// Add a variadic input argument to a MILSpec::Operation /// -/// Operation to update. -/// The input name defined by the spec for the operation. -/// The +/// Operation to update. +/// The input name defined by the spec for the operation. +/// The input value names. void AddOperationInputs(COREML_SPEC::MILSpec::Operation& op, std::string_view input_name, const std::vector& value_names); From 7018916f0453884ba85c3b3c6907f94665be55e0 Mon Sep 17 00:00:00 2001 From: vraspar Date: Mon, 22 Jul 2024 10:39:20 -0700 Subject: [PATCH 5/9] Update onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc Co-authored-by: Scott McKay --- .../core/providers/coreml/builders/impl/concat_op_builder.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc index 76ec84ee3460..850ce2fabdef 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc @@ -37,7 +37,6 @@ Status ConcatOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, std::unique_ptr op = model_builder.CreateOperation(node, "concat"); std::vector input_names; for (const auto* input : node.InputDefs()) { - LOGS(logger, VERBOSE) << "input name " << input->Name(); input_names.emplace_back(input->Name()); } AddOperationInputs(*op, "values", input_names); From a7dae2a0a5ca8382e8d67f175793b956c9718a09 Mon Sep 17 00:00:00 2001 From: vraspar Date: Mon, 22 Jul 2024 10:39:28 -0700 Subject: [PATCH 6/9] Update onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc Co-authored-by: Scott McKay --- .../core/providers/coreml/builders/impl/concat_op_builder.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc index 850ce2fabdef..551d8222cc06 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/concat_op_builder.cc @@ -28,7 +28,7 @@ Status ConcatOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const logging::Logger& logger) const { #if defined(COREML_ENABLE_MLPROGRAM) if (model_builder.CreateMLProgram()) { - using namespace CoreML::Specification::MILSpec; + using namespace CoreML::Specification::MILSpec; // NOLINT NodeAttrHelper helper(node); const auto axis = helper.GetInt64("axis"); // required From fdf2a7928852b3ca96825be1bca6d00f7b114029 Mon Sep 17 00:00:00 2001 From: vraspar Date: Mon, 22 Jul 2024 10:43:37 -0700 Subject: [PATCH 7/9] update coreml supported op docs --- tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md b/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md index 286eb320d233..b595ef189677 100644 --- a/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md +++ b/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md @@ -6,6 +6,7 @@ Keep in sync with doco generated from /docs/execution-providers/CoreML-Execution |ai.onnx:Add|| |ai.onnx:AveragePool|Only 2D Pool is supported currently. 3D and 5D support can be added if needed.| |ai.onnx:Clip|| +|ai:onnx:Concat|| |ai.onnx:Conv|Only 1D/2D Conv is supported.
Bias if provided must be constant.| |ai.onnx:Div|| |ai.onnx:Gemm|Input B must be constant.| @@ -20,4 +21,3 @@ Keep in sync with doco generated from /docs/execution-providers/CoreML-Execution |ai.onnx:Sub|| |ai.onnx:Sigmoid|| |ai:onnx:Tanh|| -|ai:onnx:Concat|| From a283827ec385514918675d27fda9b07e04dffafe Mon Sep 17 00:00:00 2001 From: vraspar Date: Mon, 22 Jul 2024 15:11:24 -0700 Subject: [PATCH 8/9] fix typo in coreml ops docs --- tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md b/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md index b595ef189677..03d770682671 100644 --- a/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md +++ b/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md @@ -6,7 +6,7 @@ Keep in sync with doco generated from /docs/execution-providers/CoreML-Execution |ai.onnx:Add|| |ai.onnx:AveragePool|Only 2D Pool is supported currently. 3D and 5D support can be added if needed.| |ai.onnx:Clip|| -|ai:onnx:Concat|| +|ai.onnx:Concat|| |ai.onnx:Conv|Only 1D/2D Conv is supported.
Bias if provided must be constant.| |ai.onnx:Div|| |ai.onnx:Gemm|Input B must be constant.| @@ -20,4 +20,4 @@ Keep in sync with doco generated from /docs/execution-providers/CoreML-Execution |ai.onnx:Reshape|| |ai.onnx:Sub|| |ai.onnx:Sigmoid|| -|ai:onnx:Tanh|| +|ai.onnx:Tanh|| From 92343a170b6ca1134bf74f502ae467d909297618 Mon Sep 17 00:00:00 2001 From: vraspar Date: Mon, 22 Jul 2024 22:16:08 -0700 Subject: [PATCH 9/9] make AddOperationInputs consistent with AddOperationInput --- .../core/providers/coreml/builders/impl/builder_utils.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc index c0f7e9fea83f..cbdd8c608d1b 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc @@ -315,10 +315,11 @@ void AddOperationInput(MILSpec::Operation& op, std::string_view input_name, std: void AddOperationInputs(MILSpec::Operation& op, std::string_view input_name, const std::vector& value_names) { - MILSpec::Argument& arg = (*op.mutable_inputs())[input_name]; + MILSpec::Argument arg; for (const auto& value : value_names) { arg.mutable_arguments()->Add()->set_name(std::string(value)); } + (*op.mutable_inputs())[input_name] = std::move(arg); } void AddOperationOutput(COREML_SPEC::MILSpec::Operation& op, const NodeArg& output) {