Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

5.4 Build warnings preventing build #3

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions onnxruntime/core/providers/rocm/rocm_pch.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@
#endif

#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hipsparse.h>
#include <rocblas/rocblas.h>
#include <hipsparse/hipsparse.h>
#include <hiprand/hiprand.h>
#include <miopen/miopen.h>
#include <hipfft.h>
#include <hipfft/hipfft.h>

#ifdef ORT_USE_NCCL
#include <rccl.h>
#include <rccl/rccl.h>
#endif

#if defined(_MSC_VER)
Expand Down
10 changes: 5 additions & 5 deletions onnxruntime/test/contrib_ops/bias_dropout_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -136,14 +136,14 @@ void RunBiasDropoutTest(const bool use_mask, const std::vector<int64_t>& input_s
}

auto output_verifier = [&](const std::vector<OrtValue>& fetches, const std::string& provider_type) {
ASSERT_GE(fetches.size(), 1);
ASSERT_GE(fetches.size(), 1u);
const auto& output_tensor = FetchTensor(fetches[0]);
auto output_span = output_tensor.DataAsSpan<float>();

const auto num_dropped_values = std::count(output_span.begin(), output_span.end(), residual_value);

if (ratio == 1.0f) {
ASSERT_EQ(num_dropped_values, static_cast<size_t>(output_span.size())) << "provider: " << provider_type;
ASSERT_EQ(static_cast<unsigned int>(num_dropped_values), static_cast<size_t>(output_span.size())) << "provider: " << provider_type;
} else {
ASSERT_NEAR(static_cast<float>(num_dropped_values) / static_cast<size_t>(output_span.size()),
training_mode == TrainingTrue ? ratio : 0.0f, 0.1f)
Expand All @@ -159,7 +159,7 @@ void RunBiasDropoutTest(const bool use_mask, const std::vector<int64_t>& input_s
}

if (use_mask) {
ASSERT_GE(fetches.size(), 2);
ASSERT_GE(fetches.size(), 2u);
const auto& mask_tensor = FetchTensor(fetches[1]);
auto mask_span = mask_tensor.DataAsSpan<bool>();
ASSERT_EQ(mask_span.size(), output_span.size()) << "provider: " << provider_type;
Expand All @@ -186,11 +186,11 @@ void RunBiasDropoutTest(const bool use_mask, const std::vector<int64_t>& input_s
t.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &t_eps);

std::vector<OrtValue> dropout_outputs = t.GetFetches();
ASSERT_GE(dropout_outputs.size(), 1);
ASSERT_GE(dropout_outputs.size(), 1u);
const float* output_values = FetchTensor(dropout_outputs[0]).Data<float>();
t_bitmask.AddOutput<float>("output", input_shape, output_values, input_size);
if (use_mask) {
ASSERT_GE(dropout_outputs.size(), 2);
ASSERT_GE(dropout_outputs.size(), 2u);
const bool* mask_values = FetchTensor(dropout_outputs[1]).Data<bool>();
std::vector<BitmaskElementType> bitmask_values = MasksToBitmasks(input_size, mask_values);
t_bitmask.AddOutput<BitmaskElementType>("mask", {static_cast<int64_t>(bitmask_values.size())}, bitmask_values);
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/contrib_ops/bitmask_dropout_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ void RunTestForTraining(const std::vector<int64_t>& input_dims) {
dropout.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &dropout_eps);

std::vector<OrtValue> dropout_outputs = dropout.GetFetches();
ASSERT_EQ(dropout_outputs.size(), 2);
ASSERT_EQ(dropout_outputs.size(), 2u);
const T* output_values = FetchTensor(dropout_outputs[0]).Data<T>();
const bool* mask_values = FetchTensor(dropout_outputs[1]).Data<bool>();
std::vector<BitmaskElementType> bitmask_values = MasksToBitmasks(input_size, mask_values);
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/test/framework/execution_provider_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ TEST(ExecutionProviderTest, MetadefIdGeneratorUsingModelPath) {
HashValue model_hash;
int id = ep.GetId(viewer, model_hash);
ASSERT_EQ(id, 0);
ASSERT_NE(model_hash, 0);
ASSERT_NE(model_hash, 0u);

for (int i = 1; i < 4; ++i) {
HashValue cur_model_hash;
Expand Down Expand Up @@ -70,7 +70,7 @@ TEST(ExecutionProviderTest, MetadefIdGeneratorUsingModelHashing) {
HashValue model_hash;
int id = ep.GetId(viewer, model_hash);
ASSERT_EQ(id, 0);
ASSERT_NE(model_hash, 0);
ASSERT_NE(model_hash, 0u);

// now load the model from bytes and check the hash differs
std::ifstream model_file_stream(model_path, std::ios::in | std::ios::binary);
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/framework/inference_session_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ void RunModelWithBindingMatMul(InferenceSession& session_object,
#if defined(USE_CUDA) || defined(USE_ROCM)
// in this case we need to copy the tensor from cuda to cpu
vector<OrtValue>& outputs = io_binding->GetOutputs();
ASSERT_EQ(1, outputs.size());
ASSERT_EQ(1u, outputs.size());
auto& rtensor = outputs.front().Get<Tensor>();
auto element_type = rtensor.DataType();
auto& shape = rtensor.Shape();
Expand Down
20 changes: 10 additions & 10 deletions onnxruntime/test/framework/random_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,26 +32,26 @@ TEST(RandomTest, PhiloxGeneratorTest) {
PhiloxGenerator generator(17);

auto seeds = generator.NextPhiloxSeeds(1);
ASSERT_EQ(seeds.first, 17);
ASSERT_EQ(seeds.second, 0);
ASSERT_EQ(seeds.first, 17u);
ASSERT_EQ(seeds.second, 0u);

seeds = generator.NextPhiloxSeeds(10);
ASSERT_EQ(seeds.first, 17);
ASSERT_EQ(seeds.second, 1);
ASSERT_EQ(seeds.first, 17u);
ASSERT_EQ(seeds.second, 1u);

seeds = generator.NextPhiloxSeeds(0);
ASSERT_EQ(seeds.first, 17);
ASSERT_EQ(seeds.second, 11);
ASSERT_EQ(seeds.first, 17u);
ASSERT_EQ(seeds.second, 11u);

seeds = generator.NextPhiloxSeeds(1);
ASSERT_EQ(seeds.first, 17);
ASSERT_EQ(seeds.second, 11);
ASSERT_EQ(seeds.first, 17u);
ASSERT_EQ(seeds.second, 11u);

generator.SetSeed(17);

seeds = generator.NextPhiloxSeeds(1);
ASSERT_EQ(seeds.first, 17);
ASSERT_EQ(seeds.second, 0);
ASSERT_EQ(seeds.first, 17u);
ASSERT_EQ(seeds.second, 0u);
}

} // namespace test
Expand Down
3 changes: 1 addition & 2 deletions onnxruntime/test/framework/session_state_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,15 +100,14 @@ TEST_P(SessionStateAddGetKernelTest, AddGetKernelTest) {

INSTANTIATE_TEST_SUITE_P(SessionStateTests, SessionStateAddGetKernelTest, testing::Values(0, 1));

namespace {
class TestParam {
public:
int ir_version;
bool enable_mem_pattern;
int thread_count;
};
TestParam param_list[] = {{3, true, 0}, {4, true, 0}, {3, false, 0}, {4, false, 0}, {3, true, 1}, {4, true, 1}, {3, false, 1}, {4, false, 1}};
} // namespace

class SessionStateTestP : public testing::TestWithParam<TestParam> {};
// Test that we separate out constant and non-constant initializers correctly
TEST_P(SessionStateTestP, TestInitializerProcessing) {
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/test/providers/cpu/generator/random_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ void RunRandomNormalGpuTest(const std::vector<int64_t> dims, const float mean, c

auto output_verifier = [&](const std::vector<OrtValue>& fetches, const std::string& provider_type) {
// Only one output, and mean of output values are near attribute mean.
ASSERT_EQ(fetches.size(), 1);
ASSERT_EQ(fetches.size(), 1u);
const auto& output_tensor = FetchTensor(fetches[0]);
if (output_dtype == TensorProto_DataType::TensorProto_DataType_FLOAT) {
auto output_span = output_tensor.DataAsSpan<float>();
Expand Down Expand Up @@ -474,7 +474,7 @@ void RunRandomUniformGpuTest(const std::vector<int64_t> dims, const float low, c
auto output_verifier = [&](const std::vector<OrtValue>& fetches, const std::string& provider_type) {
// Only one output. Each value in output tensoer is between low and high.
// Mean of output values are near attribute mean of low and high.
ASSERT_EQ(fetches.size(), 1);
ASSERT_EQ(fetches.size(), 1u);
const auto& output_tensor = FetchTensor(fetches[0]);
if (output_dtype == TensorProto_DataType::TensorProto_DataType_FLOAT) {
auto output_span = output_tensor.DataAsSpan<float>();
Expand Down