Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New benchmark compares concurrent throughput of device_vector and device_uvector #981

Merged
Merged
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
118 changes: 115 additions & 3 deletions benchmarks/device_uvector/device_uvector_bench.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -14,8 +14,14 @@
* limitations under the License.
*/

#include "../synchronization/synchronization.hpp"
#include "thrust/detail/raw_pointer_cast.h"

#include <rmm/cuda_stream.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
Expand All @@ -24,7 +30,10 @@

#include <cuda_runtime_api.h>

static void BM_UvectorSizeConstruction(benchmark::State& state)
#include <cstdio>
#include <type_traits>

void BM_UvectorSizeConstruction(benchmark::State& state)
{
rmm::mr::cuda_memory_resource cuda_mr{};
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> mr{&cuda_mr};
Expand All @@ -45,7 +54,7 @@ BENCHMARK(BM_UvectorSizeConstruction)
->Range(10'000, 1'000'000'000) // NOLINT
->Unit(benchmark::kMicrosecond);

static void BM_ThrustVectorSizeConstruction(benchmark::State& state)
void BM_ThrustVectorSizeConstruction(benchmark::State& state)
{
rmm::mr::cuda_memory_resource cuda_mr{};
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> mr{&cuda_mr};
Expand All @@ -66,4 +75,107 @@ BENCHMARK(BM_ThrustVectorSizeConstruction)
->Range(10'000, 1'000'000'000) // NOLINT
->Unit(benchmark::kMicrosecond);

// simple kernel used to test concurrent execution.
__global__ void kernel(int const* input, int* output, std::size_t num)
{
for (auto i = blockDim.x * blockIdx.x + threadIdx.x; i < num; i += gridDim.x * blockDim.x) {
output[i] = input[i] * input[i];
}
}

using thrust_vector = thrust::device_vector<int32_t>;
using rmm_vector = rmm::device_vector<int32_t>;
using rmm_uvector = rmm::device_uvector<int32_t>;

template <typename Vector>
Vector make_vector(int num_elements, rmm::cuda_stream_view stream, bool zero_init = false)
{
static_assert(std::is_same_v<Vector, thrust_vector> or std::is_same_v<Vector, rmm_vector> or
std::is_same_v<Vector, rmm_uvector>,
"unsupported vector type");
if constexpr (std::is_same_v<Vector, thrust_vector>) {
return Vector(num_elements, 0);
} else if constexpr (std::is_same_v<Vector, rmm_vector>) {
return Vector(num_elements, 0, rmm::mr::thrust_allocator<std::int32_t>(stream));
} else if constexpr (std::is_same_v<Vector, rmm_uvector>) {
auto vec = Vector(num_elements, stream);
if (zero_init) {
cudaMemsetAsync(vec.data(), 0, num_elements * sizeof(int32_t), stream.value());
}
return vec;
}
}

template <typename Vector>
int32_t* vector_data(Vector& vec)
{
return thrust::raw_pointer_cast(vec.data());
}

template <typename Vector>
void vector_workflow(std::size_t num_elements,
int num_blocks,
int block_size,
rmm::cuda_stream const& input_stream,
std::vector<rmm::cuda_stream> const& streams)
{
auto input = make_vector<Vector>(num_elements, input_stream, true);
input_stream.synchronize();
for (rmm::cuda_stream_view stream : streams) {
auto output = make_vector<Vector>(num_elements, stream);
kernel<<<num_blocks, block_size, 0, stream.value()>>>(
vector_data(input), vector_data(output), num_elements);
}

for (rmm::cuda_stream_view stream : streams) {
stream.synchronize();
}
}

template <typename Vector>
void BM_VectorWorkflow(benchmark::State& state)
{
rmm::mr::cuda_async_memory_resource cuda_async_mr{};
rmm::mr::set_current_device_resource(&cuda_async_mr);

rmm::cuda_stream input_stream;
std::vector<rmm::cuda_stream> streams(4);

auto num_elements = state.range(0);
int block_size = 256;
int num_blocks = 16;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
auto num_elements = state.range(0);
int block_size = 256;
int num_blocks = 16;
auto const num_elements = state.range(0);
int constexpr block_size = 256;
int constexpr num_blocks = 16;

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. Fixed. Most ints can be auto.


for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
cuda_event_timer timer(state, true, input_stream); // flush_l2_cache = true
vector_workflow<Vector>(num_elements, num_blocks, block_size, input_stream, streams);
jrhemstad marked this conversation as resolved.
Show resolved Hide resolved
}

constexpr auto num_accesses = 9;
auto bytes = num_elements * sizeof(int32_t) * num_accesses;
state.SetBytesProcessed(static_cast<int64_t>(state.iterations() * bytes));

rmm::mr::set_current_device_resource(nullptr);
}

BENCHMARK_TEMPLATE(BM_VectorWorkflow, thrust_vector) // NOLINT
->RangeMultiplier(10) // NOLINT
->Range(100'000, 100'000'000) // NOLINT
->Unit(benchmark::kMicrosecond)
->UseManualTime();

// The only difference here is that `rmm::device_vector` uses `rmm::current_device_resource()`
// for allocation while `thrust::device_vector` uses cudaMalloc/cudaFree. In the benchmarks we use
// `cuda_async_memory_resource`, which is faster.
BENCHMARK_TEMPLATE(BM_VectorWorkflow, rmm_vector) // NOLINT
->RangeMultiplier(10) // NOLINT
->Range(100'000, 100'000'000) // NOLINT
->Unit(benchmark::kMicrosecond)
->UseManualTime();

BENCHMARK_TEMPLATE(BM_VectorWorkflow, rmm_uvector) // NOLINT
->RangeMultiplier(10) // NOLINT
->Range(100'000, 100'000'000) // NOLINT
->Unit(benchmark::kMicrosecond)
->UseManualTime();

BENCHMARK_MAIN();