From f3cbe76059da8f53a02f8f6ac585589dab3a68dc Mon Sep 17 00:00:00 2001 From: Adam Reeve Date: Fri, 20 Sep 2024 13:04:10 +1200 Subject: [PATCH] Fix memory access violations in the CPU float16 min and max operators (#22135) ### Description Fixes the logic for getting the number of elements for the input and output spans in the `MinMaxMLFloat16` method. This was incorrectly using the full number of elements in the output rather than the number of elements in the current span, which worked fine with 1D inputs but breaks with 2D inputs. This meant that as the `BroadcastLooper` iterated over spans, `MinMaxMLFloat16` would start at a position further forward in the input and output and read and write further beyond the end of the input and output respectively, causing the asan error in #21558 and sometimes segfaults in larger examples. ### Motivation and Context Fixes #21558. From further testing, this issue didn't only cause asan errors in tests but causes segfaults with larger sized inputs. --- .../providers/cpu/math/element_wise_ops.cc | 6 +- .../cpu/math/element_wise_ops_test.cc | 98 +++++++++++++++++++ 2 files changed, 101 insertions(+), 3 deletions(-) diff --git a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc index 5ea6000da1cb..91717486b77c 100644 --- a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc +++ b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc @@ -748,7 +748,7 @@ static Status MinMaxMLFloat16(const OpKernel& inst, OpKernelContext* context) { ProcessBroadcastSpanFuncs funcs{ [](BroadcastHelper& per_iter_bh) { - auto num_elements = per_iter_bh.NumOutputElements(); + auto num_elements = per_iter_bh.EigenInput1().rows(); const auto* input_1 = reinterpret_cast(per_iter_bh.EigenInput1().data()); ConstEigenVectorArrayMap input_1_vec_map(input_1, num_elements); @@ -763,7 +763,7 @@ static Status MinMaxMLFloat16(const OpKernel& inst, OpKernelContext* context) { } }, [](BroadcastHelper& per_iter_bh) { - auto num_elements = per_iter_bh.NumOutputElements(); + auto num_elements = per_iter_bh.EigenInput0().rows(); const auto* input_0 = reinterpret_cast(per_iter_bh.EigenInput0().data()); ConstEigenVectorArrayMap input_0_vec_map(input_0, num_elements); @@ -778,7 +778,7 @@ static Status MinMaxMLFloat16(const OpKernel& inst, OpKernelContext* context) { } }, [](BroadcastHelper& per_iter_bh) { - auto num_elements = per_iter_bh.NumOutputElements(); + auto num_elements = per_iter_bh.EigenInput0().rows(); const auto* input_0 = reinterpret_cast(per_iter_bh.EigenInput0().data()); ConstEigenVectorArrayMap input_0_vec_map(input_0, num_elements); diff --git a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc index bd3d21d4929f..eb914646942f 100644 --- a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc +++ b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc @@ -1787,6 +1787,54 @@ TEST(MathOpTest, Min_12_MLFloat16_Scalar1) { test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Input batch size is inconsistent } +TEST(MathOpTest, Min_12_MLFloat16_MatrixVector) { + OpTester test("Min", 12); + test.AddInput("data_0", {3, 3}, + MakeMLFloat16({1.0f, 1.0f, 1.0f, + -0.5f, 0.0f, -2.0f, + 0.5f, 0.0f, 2.0f})); + test.AddInput("data_1", {3, 1}, + MakeMLFloat16({0.0f, -1.0f, 1.0f})); + test.AddOutput("min", {3, 3}, + MakeMLFloat16({0.0f, 0.0f, 0.0f, + -1.0f, -1.0f, -2.0f, + 0.5f, 0.0f, 1.0f})); + if (nullptr != DefaultCpuExecutionProvider()) { + std::vector> execution_providers; + execution_providers.push_back(DefaultCpuExecutionProvider()); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } + if (nullptr != DefaultCudaExecutionProvider()) { + std::vector> execution_providers; + execution_providers.push_back(DefaultCudaExecutionProvider()); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } +} + +TEST(MathOpTest, Min_12_MLFloat16_VectorMatrix) { + OpTester test("Min", 12); + test.AddInput("data_0", {3, 1}, + MakeMLFloat16({0.0f, -1.0f, 1.0f})); + test.AddInput("data_1", {3, 4}, + MakeMLFloat16({1.0f, 1.0f, 1.0f, -1.0f, + -0.5f, 0.0f, -2.0f, -1.25f, + 0.5f, 0.0f, 2.0f, 1.5f})); + test.AddOutput("min", {3, 4}, + MakeMLFloat16({0.0f, 0.0f, 0.0f, -1.0f, + -1.0f, -1.0f, -2.0f, -1.25f, + 0.5f, 0.0f, 1.0f, 1.0f})); + if (nullptr != DefaultCpuExecutionProvider()) { + std::vector> execution_providers; + execution_providers.push_back(DefaultCpuExecutionProvider()); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } + if (nullptr != DefaultCudaExecutionProvider()) { + std::vector> execution_providers; + execution_providers.push_back(DefaultCudaExecutionProvider()); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } +} + TEST(MathOpTest, Max_6) { OpTester test("Max", 6); std::vector dims{3, 3}; @@ -2137,6 +2185,56 @@ TEST(MathOpTest, Max_12_MLFloat16_Scalar1) { test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Input batch size is inconsistent } +TEST(MathOpTest, Max_12_MLFloat16_MatrixVector) { + OpTester test("Max", 12); + test.AddInput("data_0", {4, 3}, + MakeMLFloat16({1.0f, 1.0f, 1.0f, + -0.5f, 0.0f, -2.0f, + 0.0f, 0.5f, 0.75f, + 0.5f, 0.0f, 2.0f})); + test.AddInput("data_1", {4, 1}, + MakeMLFloat16({0.0f, -1.0f, 0.5f, 1.0f})); + test.AddOutput("max", {4, 3}, + MakeMLFloat16({1.0f, 1.0f, 1.0f, + -0.5f, 0.0f, -1.0f, + 0.5f, 0.5f, 0.75f, + 1.0f, 1.0f, 2.0f})); + if (nullptr != DefaultCpuExecutionProvider()) { + std::vector> execution_providers; + execution_providers.push_back(DefaultCpuExecutionProvider()); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } + if (nullptr != DefaultCudaExecutionProvider()) { + std::vector> execution_providers; + execution_providers.push_back(DefaultCudaExecutionProvider()); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } +} + +TEST(MathOpTest, Max_12_MLFloat16_VectorMatrix) { + OpTester test("Max", 12); + test.AddInput("data_0", {3, 1}, + MakeMLFloat16({0.0f, -1.0f, 1.0f})); + test.AddInput("data_1", {3, 3}, + MakeMLFloat16({1.0f, 1.0f, 1.0f, + -0.5f, 0.0f, -2.0f, + 0.5f, 0.0f, 2.0f})); + test.AddOutput("max", {3, 3}, + MakeMLFloat16({1.0f, 1.0f, 1.0f, + -0.5f, 0.0f, -1.0f, + 1.0f, 1.0f, 2.0f})); + if (nullptr != DefaultCpuExecutionProvider()) { + std::vector> execution_providers; + execution_providers.push_back(DefaultCpuExecutionProvider()); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } + if (nullptr != DefaultCudaExecutionProvider()) { + std::vector> execution_providers; + execution_providers.push_back(DefaultCudaExecutionProvider()); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + } +} + TEST(MathOpTest, Not) { OpTester test("Not"); std::vector dims{2};