From 0dccf6427b342a41de6f90d91c2352afee1c479f Mon Sep 17 00:00:00 2001 From: wejoncy Date: Mon, 30 Sep 2024 00:07:20 -0700 Subject: [PATCH] address review comments --- .../coreml/builders/impl/slice_op_builder.cc | 8 ++--- .../cpu/nn/conv_transpose_op_test.cc | 29 ++++++++++--------- .../providers/cpu/tensor/concat_op_test.cc | 2 +- .../providers/cpu/tensor/slice_op.test.cc | 2 +- .../providers/cpu/tensor/split_op_test.cc | 2 +- .../providers/cpu/tensor/transpose_test.cc | 2 +- 6 files changed, 24 insertions(+), 21 deletions(-) diff --git a/onnxruntime/core/providers/coreml/builders/impl/slice_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/slice_op_builder.cc index f795feb886b38..cb0f7cc0208e9 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/slice_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/slice_op_builder.cc @@ -224,10 +224,10 @@ bool SliceOpBuilder::HasSupportedInputsImpl(const Node& node, #ifdef COREML_ENABLE_MLPROGRAM // The [Doc](https://apple.github.io/coremltools/source/coremltools.converters.mil.mil.ops.defs.html#coremltools.converters.mil.mil.ops.defs.iOS15.tensor_transformation.slice_by_index) - // says slice_by_index is support fp16 by ML Program. It's something wrong and it requires coreml version >= 7 otherwise - // only float is supported. - // refs 1:https://github.com/apple/coremltools/blob/89d058ffdcb0b39a03031782d8a448b6889ac425/coremltools/converters/mil/mil/ops/defs/tensor_transformation.py#L515 - // refs 2:https://github.com/apple/coremltools/blob/c3ea4cf56fef1176417246c1b85363417f3e713d/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py#L495 + // says ML Program slice_by_index supports fp16 in CoreML 5 (iOS 15). + // It's incorrect and CoreML 6+ (iOS16, CoreML spec version >= 7) is required otherwise only float is supported. + // CoreML 5:https://github.com/apple/coremltools/blob/89d058ffdcb0b39a03031782d8a448b6889ac425/coremltools/converters/mil/mil/ops/defs/tensor_transformation.py#L515 + // CoreML 6:https://github.com/apple/coremltools/blob/c3ea4cf56fef1176417246c1b85363417f3e713d/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py#L495 if (input_params.create_mlprogram && input_params.coreml_version >= 7 && input_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) { } else diff --git a/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc b/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc index 99d814a0e2a15..29525f89ef544 100644 --- a/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc +++ b/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc @@ -123,6 +123,17 @@ TEST(ConvTransposeTest, ConvTranspose_1D) { TestConvTransposeOp(attrs, {X, W}, {X_shape, W_shape}, expected_vals, Y_shape); } +template +static std::vector GetTypedArray(std::vector inputs, [[maybe_unused]] T v = T(0.f)) { + if constexpr (std::is_same::value) { + return inputs; + } else { + std::vector inputs_fp16(inputs.size()); + ConvertFloatToMLFloat16(inputs.data(), inputs_fp16.data(), inputs.size()); + return inputs_fp16; + } +} + TYPED_TEST(ConvTransposeTest, ConvTranspose_2D_outputpadding_strides2) { ConvTransposeOpAttributes attrs = { vector{3, 3}, // kernel_shape @@ -201,20 +212,11 @@ TYPED_TEST(ConvTransposeTest, ConvTranspose_2D_C2) { 0.58575004f, 1.25774109f, 1.23472511f, 0.77670550f, 0.25844323f, 0.88953220f, 0.77098041f, 0.27468451f}; - if constexpr (std::is_same::value) { - TestConvTransposeOp(attrs, {X, W}, {X_shape, W_shape}, expected_vals, Y_shape); - } else { - vector X_fp16(X.size()); - ConvertFloatToMLFloat16(X.data(), X_fp16.data(), X.size()); - vector W_fp16(W.size()); - ConvertFloatToMLFloat16(W.data(), W_fp16.data(), W.size()); - std::vector expected_vals_fp16(expected_vals.size()); - ConvertFloatToMLFloat16(expected_vals.data(), expected_vals_fp16.data(), expected_vals.size()); - TestConvTransposeOp(attrs, {X_fp16, W_fp16}, {X_shape, W_shape}, expected_vals_fp16, Y_shape); - } + TestConvTransposeOp(attrs, {GetTypedArray(X), GetTypedArray(W)}, + {X_shape, W_shape}, GetTypedArray(expected_vals), Y_shape); } -TEST(ConvTransposeTest, ConvTranspose_2D_Bias_1) { +TYPED_TEST(ConvTransposeTest, ConvTranspose_2D_Bias_1) { ConvTransposeOpAttributes attrs = { vector{3, 3}, // kernel_shape vector{0, 0}, // output_padding @@ -243,7 +245,8 @@ TEST(ConvTransposeTest, ConvTranspose_2D_Bias_1) { 0.07770107f, -0.09561026f, 0.13388641f, 0.30945939f, 0.14015588f, 0.13079405f, -0.00488365f, -0.06758944f, 0.45621645f, 0.01566098f, 0.00703105f, 0.12956856f, 0.0103332f, 0.04221053f, -0.21318194f}; - TestConvTransposeOp(attrs, {X, W, B}, {X_shape, W_shape, B_shape}, expected_vals, Y_shape); + TestConvTransposeOp(attrs, {GetTypedArray(X), GetTypedArray(W), GetTypedArray(B)}, + {X_shape, W_shape, B_shape}, GetTypedArray(expected_vals), Y_shape); } TEST(ConvTransposeTest, ConvTranspose_2D_Bias_2) { diff --git a/onnxruntime/test/providers/cpu/tensor/concat_op_test.cc b/onnxruntime/test/providers/cpu/tensor/concat_op_test.cc index 48d6ad081ac19..4a1888a5ca7d6 100644 --- a/onnxruntime/test/providers/cpu/tensor/concat_op_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/concat_op_test.cc @@ -76,7 +76,7 @@ TEST(ConcatOpTest, Concat1D_2) { } template -std::vector GetTypedArray(std::vector inputs, T v = T(0.f)) { +static std::vector GetTypedArray(std::vector inputs, [[maybe_unused]] T v = T(0.f)) { if constexpr (std::is_same::value) { return inputs; } else { diff --git a/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc b/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc index 0285ec01d5869..a32d43f296250 100644 --- a/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc +++ b/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc @@ -264,7 +264,7 @@ TEST(SliceTest, Slice3D) { } template -static std::vector GetTypedArray(std::vector inputs, T v = T(0.f)) { +static std::vector GetTypedArray(std::vector inputs, [[maybe_unused]] T v = T(0.f)) { std::vector inputs_T(inputs.size()); if constexpr (std::is_same::value) { return inputs; diff --git a/onnxruntime/test/providers/cpu/tensor/split_op_test.cc b/onnxruntime/test/providers/cpu/tensor/split_op_test.cc index 35424fbdc90cf..48872404f08bd 100644 --- a/onnxruntime/test/providers/cpu/tensor/split_op_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/split_op_test.cc @@ -179,7 +179,7 @@ TEST(SplitOperatorTest, Axis0UnequalSplitFloat) { } template -std::vector GetTypedArray(std::vector inputs, T v = T(0.f)) { +std::vector GetTypedArray(std::vector inputs, [[maybe_unused]] T v = T(0.f)) { if constexpr (std::is_same::value) { return inputs; } else { diff --git a/onnxruntime/test/providers/cpu/tensor/transpose_test.cc b/onnxruntime/test/providers/cpu/tensor/transpose_test.cc index 28ee044df3f6a..3b46dc3f5d6a2 100644 --- a/onnxruntime/test/providers/cpu/tensor/transpose_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/transpose_test.cc @@ -70,7 +70,7 @@ void TransposeTest(const std::vector& input_shape, } template -std::vector GetTypedArray(std::vector inputs, T v = T(0.f)) { +std::vector GetTypedArray(std::vector inputs, [[maybe_unused]] T v = T(0.f)) { if constexpr (std::is_same::value) { return inputs; } else {