Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[tests] remove direct std::random_device usage. #2397

Merged
merged 16 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 9 additions & 13 deletions test/gtest/bn_test_data.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,13 +130,9 @@ struct BNTestData

void InitTensorsWithRandValue()
{
std::random_device rd{};
std::mt19937 gen{rd()};
std::uniform_int_distribution<> d{0, 100};
auto gen_value = [&](auto...) {
return 1e-2 * static_cast<XDataType>(d(gen)) * ((d(gen) % 2 == 1) ? -1 : 1);
};
input.generate(gen_value);
input.generate([](auto...) {
return prng::gen_descreet_uniform_sign(static_cast<XDataType>(1e-2), 100);
});
}

void SetDirection() { direction = bn_config.Direction; }
Expand Down Expand Up @@ -204,17 +200,17 @@ struct BNInferTestData : public BNTestData<XDataType, YDataType, TConfig>

void InitTensorsWithRandValue()
{
std::random_device rd{};
std::mt19937 gen{rd()};
std::uniform_int_distribution<> d{0, 100};
auto gen_value = [&](auto...) {
return 1e-2 * static_cast<ScaleDataType>(d(gen)) * ((d(gen) % 2 == 1) ? -1 : 1);
auto gen_value = [](auto...) {
return prng::gen_descreet_uniform_sign(static_cast<ScaleDataType>(1e-2), 100);
};
scale.generate(gen_value);
shift.generate(gen_value);
estMean.generate(gen_value);

auto gen_var = [&](auto...) { return 1e-2 * (static_cast<MeanVarDataType>(d(gen)) + 1); };
auto gen_var = [](auto...) {
return static_cast<MeanVarDataType>(1e-2) *
static_cast<MeanVarDataType>(prng::gen_0_to_B(100) + 1);
};
estVariance.generate(gen_var);
}
void WriteToGPU()
Expand Down
7 changes: 3 additions & 4 deletions test/gtest/group_conv3d_bwd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,9 @@ struct ConvBwdSolverTest
weights = tensor<T>{miopen_type<T>{}, tensor_layout, conv_config.GetWeights()};
SetTensorLayout(input.desc);
SetTensorLayout(weights.desc);
std::random_device rd{};
std::mt19937 gen{rd()};
std::uniform_real_distribution<> d{-3, 3};
auto gen_value = [&](auto...) { return d(gen); };
auto gen_value = [](auto...) {
return prng::gen_A_to_B(static_cast<T>(-3.0), static_cast<T>(3.0));
};
std::fill(input.begin(), input.end(), std::numeric_limits<double>::quiet_NaN());
weights.generate(gen_value);
conv_desc = conv_config.GetConv();
Expand Down
7 changes: 3 additions & 4 deletions test/gtest/group_conv3d_fwd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,9 @@ struct ConvFwdSolverTest
weights = tensor<T>{miopen_type<T>{}, tensor_layout, conv_config.GetWeights()};
SetTensorLayout(input.desc);
SetTensorLayout(weights.desc);
std::random_device rd{};
std::mt19937 gen{rd()};
std::uniform_real_distribution<> d{-3, 3};
auto gen_value = [&](auto...) { return d(gen); };
auto gen_value = [](auto...) {
return prng::gen_A_to_B(static_cast<T>(-3.0), static_cast<T>(3.0));
};
input.generate(gen_value);
weights.generate(gen_value);
conv_desc = conv_config.GetConv();
Expand Down
8 changes: 4 additions & 4 deletions test/gtest/group_conv3d_wrw.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,10 @@ struct ConvWrwSolverTest
std::tie(algo, conv_config, tensor_layout) = GetParam();
input = tensor<T>{miopen_type<T>{}, tensor_layout, conv_config.GetInput()};
weights = tensor<T>{miopen_type<T>{}, tensor_layout, conv_config.GetWeights()};
std::random_device rd{};
std::mt19937 gen{rd()};
std::uniform_real_distribution<> d{-3, 3};
auto gen_value = [&](auto...) { return d(gen); };

auto gen_value = [](auto...) {
return prng::gen_A_to_B(static_cast<T>(-3.0), static_cast<T>(3.0));
};
input.generate(gen_value);

std::fill(weights.begin(), weights.end(), 0);
Expand Down
8 changes: 3 additions & 5 deletions test/gtest/group_solver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -138,11 +138,9 @@ struct ConvFwdSolverTest
weights = tensor<T>{miopen_type<T>{}, tensor_layout, conv_config.GetWeights()};
SetTensorLayout(input.desc);
SetTensorLayout(weights.desc);

std::random_device rd{};
std::mt19937 gen{rd()};
std::uniform_real_distribution<> d{-3, 3};
auto gen_value = [&](auto...) { return d(gen); };
auto gen_value = [](auto...) {
return prng::gen_A_to_B(static_cast<T>(-3.0), static_cast<T>(3.0));
};
input.generate(gen_value);
weights.generate(gen_value);

Expand Down
16 changes: 8 additions & 8 deletions test/gtest/log.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@

#include <miopen/config.h>
#include <miopen/fusion_plan.hpp>
#include <stdlib.h>
#include <random>
#include <cstdlib>
#include "random.hpp"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

minor: perhaps we should name (and move) our "random.hpp" to "miopen/random.hpp" or if that's not feasible then "miopen_random.hpp" to avoid confusion with <random> from STL.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@amberhassaan

perhaps we should name (and move) our "random.hpp" to "miopen/random.hpp"

The library does not use random.hpp (while tests and driver use different versions of this file). Therefore, this is not possible and not needed.

or if that's not feasible then "miopen_random.hpp" to avoid confusion with from STL.

But I do not see any confusion. For example, double quotes imply that the included file is not from the standard library.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're right "random.hpp" should convey that it's local. However, we are not consistent in our repository and we often use <> for local MIOpen includes.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The quoted form should only be used for headers that are relative to the including file, and everything else should use the angle brackets. In general, MIOpen follows this rule. This is also the guideline from C++ Core Guidelines SF.12.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Didn't know that we were following that core guidelines. Some projects exclusively use <> for system includes and "" for project includes.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@CAHEK7 Please resolve #2397 (comment)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@CAHEK7 "../random.hpp" here is expected to comply the rule.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@atamazov @pfultz2 done.
But what about the others includes? Mixing different paradigms of including headers may be confusing.

#include "log.hpp"
#include "tensor_util.hpp"
#include "get_handle.hpp"
...
#include "../random.hpp"

Shall everything be fixed uniformly across the project, because it's a very common case where we include files from the project using quotes and implying relative includes.

Almost all the files in gtests directory and some of the files in the test directory should be fixed.


#if MIOPEN_BACKEND_OPENCL
#define BKEND "OpenCL"
Expand Down Expand Up @@ -226,17 +226,17 @@ struct CreateBNormFusionPlan
shift = tensor<T>{input_lens};
estMean = tensor<T>{input_lens};
estVariance = tensor<T>{input_lens};
std::random_device rd{};
std::mt19937 gen{rd()};
std::uniform_int_distribution<> d{0, 100};
auto gen_value = [&](auto...) {
return 1e-2 * static_cast<T>(d(gen)) * ((d(gen) % 2 == 1) ? -1 : 1);

auto gen_value = [](auto...) {
return prng::gen_descreet_uniform_sign(static_cast<T>(1e-2), 100);
};
input.generate(gen_value);
scale.generate(gen_value);
shift.generate(gen_value);
estMean.generate(gen_value);
auto gen_var = [&](auto...) { return 1e-2 * (static_cast<T>(d(gen)) + 1); };
auto gen_var = [](auto...) {
return static_cast<T>(1e-2) * static_cast<T>(prng::gen_0_to_B(100) + 1);
};
estVariance.generate(gen_var);
activ_desc = {activ_mode, activ_alpha, activ_beta, activ_gamma};
output = tensor<T>{input_lens};
Expand Down
16 changes: 7 additions & 9 deletions test/gtest/na.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@
*******************************************************************************/
#pragma once

#include <random>

#include <gtest/gtest.h>
#include <miopen/miopen.h>
#include <miopen/solver_id.hpp>
Expand All @@ -35,7 +33,7 @@

#include "tensor_util.hpp"
#include "get_handle.hpp"

#include "random.hpp"
struct BNTestCase
{
size_t N;
Expand Down Expand Up @@ -107,17 +105,17 @@ struct BNActivInferTest
shift = tensor<T>{derivedBnDesc.GetLengths()};
estMean = tensor<T>{derivedBnDesc.GetLengths()};
estVariance = tensor<T>{derivedBnDesc.GetLengths()};
std::random_device rd{};
std::mt19937 gen{rd()};
std::uniform_int_distribution<> d{0, 100};
auto gen_value = [&](auto...) {
return 1e-2 * static_cast<T>(d(gen)) * ((d(gen) % 2 == 1) ? -1 : 1);

auto gen_value = [](auto...) {
return prng::gen_descreet_uniform_sign(static_cast<T>(1e-2), 100);
};
input.generate(gen_value);
scale.generate(gen_value);
shift.generate(gen_value);
estMean.generate(gen_value);
auto gen_var = [&](auto...) { return 1e-2 * (static_cast<T>(d(gen)) + 1); };
auto gen_var = [](auto...) {
return static_cast<T>(1e-2) * static_cast<T>(prng::gen_0_to_B(100) + 1);
};
estVariance.generate(gen_var);
activ_desc = {activ_mode, activ_alpha, activ_beta, activ_gamma};
output = tensor<T>{bn_config.GetInput()};
Expand Down