From 17f0c3f1e6c620af4474a74ff4a0aa2a9813bff6 Mon Sep 17 00:00:00 2001 From: jpvillam Date: Wed, 19 Oct 2022 18:14:44 +0000 Subject: [PATCH] Added unsigned identifier for size compare --- onnxruntime/core/providers/rocm/rocm_pch.h | 8 ++++---- .../test/contrib_ops/bias_dropout_op_test.cc | 10 +++++----- .../contrib_ops/bitmask_dropout_op_test.cc | 2 +- .../test/framework/execution_provider_test.cc | 4 ++-- .../test/framework/inference_session_test.cc | 2 +- onnxruntime/test/framework/random_test.cc | 20 +++++++++---------- .../test/framework/session_state_test.cc | 3 +-- .../providers/cpu/generator/random_test.cc | 4 ++-- 8 files changed, 26 insertions(+), 27 deletions(-) diff --git a/onnxruntime/core/providers/rocm/rocm_pch.h b/onnxruntime/core/providers/rocm/rocm_pch.h index 59df0aaa23498..f89f71684ffc0 100644 --- a/onnxruntime/core/providers/rocm/rocm_pch.h +++ b/onnxruntime/core/providers/rocm/rocm_pch.h @@ -9,14 +9,14 @@ #endif #include -#include -#include +#include +#include #include #include -#include +#include #ifdef ORT_USE_NCCL -#include +#include #endif #if defined(_MSC_VER) diff --git a/onnxruntime/test/contrib_ops/bias_dropout_op_test.cc b/onnxruntime/test/contrib_ops/bias_dropout_op_test.cc index 2f1c8b54b20cd..b069aff884c2e 100644 --- a/onnxruntime/test/contrib_ops/bias_dropout_op_test.cc +++ b/onnxruntime/test/contrib_ops/bias_dropout_op_test.cc @@ -136,14 +136,14 @@ void RunBiasDropoutTest(const bool use_mask, const std::vector& input_s } auto output_verifier = [&](const std::vector& fetches, const std::string& provider_type) { - ASSERT_GE(fetches.size(), 1); + ASSERT_GE(fetches.size(), 1u); const auto& output_tensor = FetchTensor(fetches[0]); auto output_span = output_tensor.DataAsSpan(); const auto num_dropped_values = std::count(output_span.begin(), output_span.end(), residual_value); if (ratio == 1.0f) { - ASSERT_EQ(num_dropped_values, static_cast(output_span.size())) << "provider: " << provider_type; + ASSERT_EQ(static_cast(num_dropped_values), static_cast(output_span.size())) << "provider: " << provider_type; } else { ASSERT_NEAR(static_cast(num_dropped_values) / static_cast(output_span.size()), training_mode == TrainingTrue ? ratio : 0.0f, 0.1f) @@ -159,7 +159,7 @@ void RunBiasDropoutTest(const bool use_mask, const std::vector& input_s } if (use_mask) { - ASSERT_GE(fetches.size(), 2); + ASSERT_GE(fetches.size(), 2u); const auto& mask_tensor = FetchTensor(fetches[1]); auto mask_span = mask_tensor.DataAsSpan(); ASSERT_EQ(mask_span.size(), output_span.size()) << "provider: " << provider_type; @@ -186,11 +186,11 @@ void RunBiasDropoutTest(const bool use_mask, const std::vector& input_s t.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &t_eps); std::vector dropout_outputs = t.GetFetches(); - ASSERT_GE(dropout_outputs.size(), 1); + ASSERT_GE(dropout_outputs.size(), 1u); const float* output_values = FetchTensor(dropout_outputs[0]).Data(); t_bitmask.AddOutput("output", input_shape, output_values, input_size); if (use_mask) { - ASSERT_GE(dropout_outputs.size(), 2); + ASSERT_GE(dropout_outputs.size(), 2u); const bool* mask_values = FetchTensor(dropout_outputs[1]).Data(); std::vector bitmask_values = MasksToBitmasks(input_size, mask_values); t_bitmask.AddOutput("mask", {static_cast(bitmask_values.size())}, bitmask_values); diff --git a/onnxruntime/test/contrib_ops/bitmask_dropout_op_test.cc b/onnxruntime/test/contrib_ops/bitmask_dropout_op_test.cc index 8dd88cb67f012..587b16398e99d 100644 --- a/onnxruntime/test/contrib_ops/bitmask_dropout_op_test.cc +++ b/onnxruntime/test/contrib_ops/bitmask_dropout_op_test.cc @@ -129,7 +129,7 @@ void RunTestForTraining(const std::vector& input_dims) { dropout.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &dropout_eps); std::vector dropout_outputs = dropout.GetFetches(); - ASSERT_EQ(dropout_outputs.size(), 2); + ASSERT_EQ(dropout_outputs.size(), 2u); const T* output_values = FetchTensor(dropout_outputs[0]).Data(); const bool* mask_values = FetchTensor(dropout_outputs[1]).Data(); std::vector bitmask_values = MasksToBitmasks(input_size, mask_values); diff --git a/onnxruntime/test/framework/execution_provider_test.cc b/onnxruntime/test/framework/execution_provider_test.cc index f43f8e39b649d..5a7351a766fa3 100644 --- a/onnxruntime/test/framework/execution_provider_test.cc +++ b/onnxruntime/test/framework/execution_provider_test.cc @@ -39,7 +39,7 @@ TEST(ExecutionProviderTest, MetadefIdGeneratorUsingModelPath) { HashValue model_hash; int id = ep.GetId(viewer, model_hash); ASSERT_EQ(id, 0); - ASSERT_NE(model_hash, 0); + ASSERT_NE(model_hash, 0u); for (int i = 1; i < 4; ++i) { HashValue cur_model_hash; @@ -70,7 +70,7 @@ TEST(ExecutionProviderTest, MetadefIdGeneratorUsingModelHashing) { HashValue model_hash; int id = ep.GetId(viewer, model_hash); ASSERT_EQ(id, 0); - ASSERT_NE(model_hash, 0); + ASSERT_NE(model_hash, 0u); // now load the model from bytes and check the hash differs std::ifstream model_file_stream(model_path, std::ios::in | std::ios::binary); diff --git a/onnxruntime/test/framework/inference_session_test.cc b/onnxruntime/test/framework/inference_session_test.cc index 4ccf5298ebee0..5c07c624ecd6d 100644 --- a/onnxruntime/test/framework/inference_session_test.cc +++ b/onnxruntime/test/framework/inference_session_test.cc @@ -353,7 +353,7 @@ void RunModelWithBindingMatMul(InferenceSession& session_object, #if defined(USE_CUDA) || defined(USE_ROCM) // in this case we need to copy the tensor from cuda to cpu vector& outputs = io_binding->GetOutputs(); - ASSERT_EQ(1, outputs.size()); + ASSERT_EQ(1u, outputs.size()); auto& rtensor = outputs.front().Get(); auto element_type = rtensor.DataType(); auto& shape = rtensor.Shape(); diff --git a/onnxruntime/test/framework/random_test.cc b/onnxruntime/test/framework/random_test.cc index 960a4d83e3100..b685d95fbec1f 100644 --- a/onnxruntime/test/framework/random_test.cc +++ b/onnxruntime/test/framework/random_test.cc @@ -32,26 +32,26 @@ TEST(RandomTest, PhiloxGeneratorTest) { PhiloxGenerator generator(17); auto seeds = generator.NextPhiloxSeeds(1); - ASSERT_EQ(seeds.first, 17); - ASSERT_EQ(seeds.second, 0); + ASSERT_EQ(seeds.first, 17u); + ASSERT_EQ(seeds.second, 0u); seeds = generator.NextPhiloxSeeds(10); - ASSERT_EQ(seeds.first, 17); - ASSERT_EQ(seeds.second, 1); + ASSERT_EQ(seeds.first, 17u); + ASSERT_EQ(seeds.second, 1u); seeds = generator.NextPhiloxSeeds(0); - ASSERT_EQ(seeds.first, 17); - ASSERT_EQ(seeds.second, 11); + ASSERT_EQ(seeds.first, 17u); + ASSERT_EQ(seeds.second, 11u); seeds = generator.NextPhiloxSeeds(1); - ASSERT_EQ(seeds.first, 17); - ASSERT_EQ(seeds.second, 11); + ASSERT_EQ(seeds.first, 17u); + ASSERT_EQ(seeds.second, 11u); generator.SetSeed(17); seeds = generator.NextPhiloxSeeds(1); - ASSERT_EQ(seeds.first, 17); - ASSERT_EQ(seeds.second, 0); + ASSERT_EQ(seeds.first, 17u); + ASSERT_EQ(seeds.second, 0u); } } // namespace test diff --git a/onnxruntime/test/framework/session_state_test.cc b/onnxruntime/test/framework/session_state_test.cc index de7f93ab7171d..430f91e0efcfc 100644 --- a/onnxruntime/test/framework/session_state_test.cc +++ b/onnxruntime/test/framework/session_state_test.cc @@ -100,7 +100,6 @@ TEST_P(SessionStateAddGetKernelTest, AddGetKernelTest) { INSTANTIATE_TEST_SUITE_P(SessionStateTests, SessionStateAddGetKernelTest, testing::Values(0, 1)); -namespace { class TestParam { public: int ir_version; @@ -108,7 +107,7 @@ class TestParam { int thread_count; }; TestParam param_list[] = {{3, true, 0}, {4, true, 0}, {3, false, 0}, {4, false, 0}, {3, true, 1}, {4, true, 1}, {3, false, 1}, {4, false, 1}}; -} // namespace + class SessionStateTestP : public testing::TestWithParam {}; // Test that we separate out constant and non-constant initializers correctly TEST_P(SessionStateTestP, TestInitializerProcessing) { diff --git a/onnxruntime/test/providers/cpu/generator/random_test.cc b/onnxruntime/test/providers/cpu/generator/random_test.cc index a59e19f2f1cbd..e3101297b25f3 100644 --- a/onnxruntime/test/providers/cpu/generator/random_test.cc +++ b/onnxruntime/test/providers/cpu/generator/random_test.cc @@ -381,7 +381,7 @@ void RunRandomNormalGpuTest(const std::vector dims, const float mean, c auto output_verifier = [&](const std::vector& fetches, const std::string& provider_type) { // Only one output, and mean of output values are near attribute mean. - ASSERT_EQ(fetches.size(), 1); + ASSERT_EQ(fetches.size(), 1u); const auto& output_tensor = FetchTensor(fetches[0]); if (output_dtype == TensorProto_DataType::TensorProto_DataType_FLOAT) { auto output_span = output_tensor.DataAsSpan(); @@ -474,7 +474,7 @@ void RunRandomUniformGpuTest(const std::vector dims, const float low, c auto output_verifier = [&](const std::vector& fetches, const std::string& provider_type) { // Only one output. Each value in output tensoer is between low and high. // Mean of output values are near attribute mean of low and high. - ASSERT_EQ(fetches.size(), 1); + ASSERT_EQ(fetches.size(), 1u); const auto& output_tensor = FetchTensor(fetches[0]); if (output_dtype == TensorProto_DataType::TensorProto_DataType_FLOAT) { auto output_span = output_tensor.DataAsSpan();