diff --git a/runtime/core/exec_aten/util/dim_order_util.h b/runtime/core/exec_aten/util/dim_order_util.h index 4c5858fb5e9..926e87d3235 100644 --- a/runtime/core/exec_aten/util/dim_order_util.h +++ b/runtime/core/exec_aten/util/dim_order_util.h @@ -9,6 +9,8 @@ #pragma once #include +#include +#include #include #include @@ -257,6 +259,33 @@ ET_NODISCARD inline Error stride_to_dim_order( } return Error::Ok; } + +/** + * Print a string representation of an ArrayRef of tensor sizes into a + * user-provided string buffer. If the user buffer is too small, the string + * will be truncated. The output is of the format (1,2,3,4). + * + * Note that we cannot use ArrayRef here due to a circular dependency (see + * above comments). + */ +template +inline void sizes_to_string(char* output, size_t output_size, SizesType* sizes, size_t rank) { + auto remaining_size = output_size; + for (auto i = 0; remaining_size > 0 && i < rank; i++) { + snprintf( + output, + remaining_size, + "%s%zd", + i == 0 ? "(" : ",", + static_cast(sizes[i])); + auto len = strlen(output); + output += len; + remaining_size -= len; + } + snprintf(output, remaining_size, ")"); +} + + } // namespace runtime } // namespace executorch diff --git a/runtime/core/portable_type/tensor_impl.cpp b/runtime/core/portable_type/tensor_impl.cpp index 689a37cb5d8..f352a35fc0c 100644 --- a/runtime/core/portable_type/tensor_impl.cpp +++ b/runtime/core/portable_type/tensor_impl.cpp @@ -90,24 +90,39 @@ Error TensorImpl::internal_resize_contiguous(ArrayRef new_sizes) { if (dim_ == 0) { return Error::Ok; } + + switch (shape_dynamism_) { case TensorShapeDynamism::STATIC: - ET_CHECK_OR_RETURN_ERROR( - std::equal(sizes_, sizes_ + dim_, new_sizes.begin()), - NotSupported, - "Attempted to resize a static tensor"); + if (!std::equal(sizes_, sizes_ + dim_, new_sizes.begin())) { +#ifdef ET_LOG_ENABLED + std::array old_sizes_str, new_sizes_str; + + executorch::runtime::sizes_to_string(old_sizes_str.data(), old_sizes_str.size(), sizes().data(), sizes().size()); + executorch::runtime::sizes_to_string(new_sizes_str.data(), new_sizes_str.size(), new_sizes.data(), new_sizes.size()); +#endif + + ET_CHECK_OR_RETURN_ERROR( + false, + NotSupported, + "Attempted to resize a static tensor. Expected shape %s, but received %s.", + old_sizes_str.data(), + new_sizes_str.data()) + } + break; case TensorShapeDynamism::DYNAMIC_BOUND: // TODO(T175194371): Unbounded dynamic tensor resizing is not yet // supported: treat them as upper-bounded. case TensorShapeDynamism::DYNAMIC_UNBOUND: { const auto new_numel = compute_numel(new_sizes.data(), dim_); + ET_CHECK_OR_RETURN_ERROR( new_numel <= numel_bound_, NotSupported, - "Attempted to resize a bounded tensor with capacity of %zu elements to %zu elements.", - new_numel, - numel_bound_); + "Attempted to resize a bounded tensor with a maximum capacity of %zu elements to %zu elements.", + numel_bound_, + new_numel); if (strides_ && dim_order_) { auto error = diff --git a/runtime/executor/method.cpp b/runtime/executor/method.cpp index eb0a8c8bb6a..90d4c71953b 100644 --- a/runtime/executor/method.cpp +++ b/runtime/executor/method.cpp @@ -1061,7 +1061,7 @@ Error Method::execute_instruction() { // We know that instr_args_as_KernelCall is non-null because it was // checked at init time. auto op_index = instruction->instr_args_as_KernelCall()->op_index(); - auto op = serialization_plan_->operators()->Get(op_index); + ET_UNUSED auto op = serialization_plan_->operators()->Get(op_index); ET_LOG( Error, "KernelCall failed at instruction %zu:%zu in operator %s.%s: 0x%x", diff --git a/runtime/kernel/operator_registry.cpp b/runtime/kernel/operator_registry.cpp index 78aa0a51732..4633046c823 100644 --- a/runtime/kernel/operator_registry.cpp +++ b/runtime/kernel/operator_registry.cpp @@ -74,7 +74,7 @@ Error register_kernels_internal(const Span kernels) { return Error::Internal; } // for debugging purpose - const char* lib_name = et_pal_get_shared_library_name(kernels.data()); + ET_UNUSED const char* lib_name = et_pal_get_shared_library_name(kernels.data()); for (const auto& kernel : kernels) { // Linear search. This is fine if the number of kernels is small.