Skip to content

Commit

Permalink
Address PR comments
Browse files Browse the repository at this point in the history
  • Loading branch information
skottmckay committed Jul 20, 2024
1 parent 9921187 commit d866225
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ bool ConvTransposeOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilder
// - output_shape CoreML output is inconsistent so disabled for now
//
// NOTE: need to test with/without the COREML_FLAG_USE_CPU_ONLY flag being set to get an idea of how flaky the CoreML
// behaviour is.
// behavior is.
// Update /onnxruntime/test/util/default_providers.cc:DefaultCoreMLExecutionProvider to do so

const auto& input_defs = node.InputDefs();
Expand Down Expand Up @@ -154,13 +154,14 @@ bool ConvTransposeOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilder
LOGS(logger, VERBOSE) << "ConvTranspose: support for SAME_LOWER/SAME_UPPER is not implemented yet";
return false;
} else if (autopad == AutoPadType::NOTSET) {
// CoreML output is inconsistent if pads are asymmetric.
// CPU works. Other devices don't seem to (at least on macOS).
// CoreML output is inconsistent between CPU_ONLY and ALL if the pads aren't all the same value.
// CPU matches the expected output, but other devices don't seem to (at least on macOS).
auto onnx_pads = *helper.GetInt64s("pads"); // 'pads' are required if auto_pad is NOTSET
const auto pad_value = onnx_pads[0];
if (!std::all_of(onnx_pads.begin() + 1, onnx_pads.end(),
[pad_value](auto value) { return value == pad_value; })) {
LOGS(logger, VERBOSE) << "ConvTranspose: pads must be symmetric for CoreML to return consistent results";
LOGS(logger, VERBOSE) << "ConvTranspose: all pad values must be the same for CoreML to return "
"consistent results";
return false;
}
}
Expand Down Expand Up @@ -193,7 +194,6 @@ bool ConvTransposeOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilder
// ONNX. Running without that flag produces the expected output. Madness...
auto output_shape = helper.GetInt64s("output_shape");
if (output_shape) {
// there is an output_shape input, but the padding seems to be different so results don't
LOGS(logger, VERBOSE) << "ConvTranspose: output_shape is not supported as the CoreML output is inconsistent";
return false;
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/coreml/model/model.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ using GetOutputTensorMutableRawDataFn = std::function<void*(const std::string& n

#if defined(__APPLE__)
// helper function that we unit test
Status GetMLMultiArrayCopyInfo(const MLMultiArray* array, int64_t* num_blocks, int64_t* block_size, int64_t* stride);
Status GetMLMultiArrayCopyInfo(const MLMultiArray* array, int64_t& num_blocks, int64_t& block_size, int64_t& stride);
#endif

class Model {
Expand Down
23 changes: 12 additions & 11 deletions onnxruntime/core/providers/coreml/model/model.mm
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ - (Status)predict:(const std::unordered_map<std::string, OnnxTensorData>&)inputs
int64_t block_size = 0;
int64_t stride = 0;

ORT_RETURN_IF_ERROR(GetMLMultiArrayCopyInfo(data, &num_blocks, &block_size, &stride));
ORT_RETURN_IF_ERROR(GetMLMultiArrayCopyInfo(data, num_blocks, block_size, stride));

__block Status copy_status;
const auto* tensor_info = &output_tensor_info;
Expand Down Expand Up @@ -454,7 +454,8 @@ - (Status)predict:(const std::unordered_map<std::string, OnnxTensorData>&)inputs
namespace onnxruntime {
namespace coreml {

Status GetMLMultiArrayCopyInfo(const MLMultiArray* array, int64_t* num_blocks, int64_t* block_size, int64_t* stride) {
Status GetMLMultiArrayCopyInfo(const MLMultiArray* _Nonnull array,
int64_t& num_blocks, int64_t& block_size, int64_t& stride) {
const auto* shape = array.shape;
const auto rank = shape.count;

Expand All @@ -466,31 +467,31 @@ Status GetMLMultiArrayCopyInfo(const MLMultiArray* array, int64_t* num_blocks, i
int64_t this_stride = [array.strides[rank - i] longLongValue];
if (this_stride != total_elems) {
// non-contiguous if we have to move more than batch_elems for each entry
if (*block_size != 0) {
if (block_size != 0) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL,
"Multiple non-contiguous dimensions in MLMultiArray are not supported.");
}

*block_size = data_elems;
*stride = this_stride;
block_size = data_elems;
stride = this_stride;
}

const auto elems_this_dim = [shape[rank - i] longLongValue];
data_elems *= elems_this_dim;
total_elems = elems_this_dim * this_stride;
}

if (*block_size == 0) {
if (block_size == 0) {
// contiguous
*block_size = data_elems;
*stride = array_total_elements;
block_size = data_elems;
stride = array_total_elements;
}

*num_blocks = data_elems / *block_size;
num_blocks = data_elems / block_size;

ORT_ENFORCE(array_total_elements == total_elems, "Logic error calculating copy info");
ORT_ENFORCE(*stride >= *block_size, "Logic error calculating copy info");
ORT_ENFORCE(*stride * *num_blocks == total_elems, "Logic error calculating copy info");
ORT_ENFORCE(stride >= block_size, "Logic error calculating copy info");
ORT_ENFORCE(stride * num_blocks == total_elems, "Logic error calculating copy info");

return Status::OK();
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/providers/coreml/utils_test.mm
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ auto ValidateGetInfo(MLMultiArray* array,
int64_t num_blocks = 0;
int64_t block_size = 0;
int64_t stride = 0;
auto status = coreml::GetMLMultiArrayCopyInfo(array, &num_blocks, &block_size, &stride);
auto status = coreml::GetMLMultiArrayCopyInfo(array, num_blocks, block_size, stride);

if (!expect_valid) {
ASSERT_STATUS_NOT_OK(status);
Expand Down

0 comments on commit d866225

Please sign in to comment.