Skip to content

Commit

Permalink
Add NVTX ranges to all CUB algorithms
Browse files Browse the repository at this point in the history
Fixes: #719

Co-authored-by: Michael Schellenberger Costa <[email protected]>
  • Loading branch information
bernhardmgruber and miscco committed Apr 22, 2024
1 parent ac49021 commit f9de904
Show file tree
Hide file tree
Showing 17 changed files with 318 additions and 0 deletions.
114 changes: 114 additions & 0 deletions cub/cub/detail/nvtx.cuh
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
/******************************************************************************
* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/

#pragma once

#include <cub/config.cuh>

#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
# pragma GCC system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
# pragma clang system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
# pragma system_header
#endif // no system header

#if __has_include(<source_location> )
# include <source_location>
#endif // __has_include(<source_location>)

// NVTX documentation: https://nvidia.github.io/NVTX/
#include <cuda/std/optional>

#include <nvtx3/nvToolsExt.h>

CUB_NAMESPACE_BEGIN

namespace detail
{
struct NVTXGlobal
{
nvtxDomainHandle_t domain;

NVTXGlobal()
{
domain = nvtxDomainCreate("CUB");
}

~NVTXGlobal()
{
nvtxDomainDestroy(domain);
}
};

// Triggers global setup of the NVTX domain for CUB.
// TODO(bgruber): replace by an inline variable in C++17
auto& nvtxGlobal() {
static NVTXGlobal g;
return g;
}

// A scope guard to create an named NVTX range inside the CUB domain.
struct NVTXGuard
{
// FIXME(bgruber): _CCCL_DEVICE is only needed to make cuda::std::optional happy.

CUB_RUNTIME_FUNCTION _CCCL_DEVICE NVTXGuard(const char* name)
{
NV_IF_TARGET(
NV_IS_HOST,
// TODO(bgruber): documentation mentions a nvtxDomainRangePushA, but it does not exist
nvtxEventAttributes_t eventAttrib{};
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = name;
nvtxDomainRangePushEx(nvtxGlobal().domain, &eventAttrib););
}

#ifdef __cpp_lib_source_location
_CCCL_DEVICE NVTXGuard(const std::source_location& loc = std::source_location::current())
: NVTXGuard(loc.function_name())
{}
#endif // __cpp_lib_source_location

CUB_RUNTIME_FUNCTION _CCCL_DEVICE ~NVTXGuard()
{
NV_IF_TARGET(NV_IS_HOST, nvtxDomainRangePop(nvtxGlobal().domain););
}
};
} // namespace detail

// Inserts a NVTX range starting here until the end of the current function scope
// TODO(bgruber): replace this by NVTX3_FUNC_RANGE from <nvtx3/nvtx3.hpp>, when available. Wasn't available in CTK 12.4.
#define CUB_NVTX_RANGE_SCOPE [[maybe_unused]] ::cub::detail::NVTXGuard youShallNotGuessThisVariableName(__func__)
#define CUB_NVTX_RANGE_SCOPE_IF(condition) \
::cuda::std::optional<::cub::detail::NVTXGuard> youShallNotGuessThisVariableName; \
if (condition) \
youShallNotGuessThisVariableName.emplace(__func__);

CUB_NAMESPACE_END
9 changes: 9 additions & 0 deletions cub/cub/device/device_adjacent_difference.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#endif // no system header

#include <cub/detail/choose_offset.cuh>
#include <cub/detail/nvtx.cuh>
#include <cub/detail/type_traits.cuh>
#include <cub/device/dispatch/dispatch_adjacent_difference.cuh>
#include <cub/util_deprecated.cuh>
Expand Down Expand Up @@ -256,6 +257,8 @@ public:
DifferenceOpT difference_op = {},
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);

constexpr bool may_alias = false;
constexpr bool read_left = true;

Expand Down Expand Up @@ -381,6 +384,8 @@ public:
DifferenceOpT difference_op = {},
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);

constexpr bool may_alias = true;
constexpr bool read_left = true;

Expand Down Expand Up @@ -524,6 +529,8 @@ public:
DifferenceOpT difference_op = {},
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);

constexpr bool may_alias = false;
constexpr bool read_left = false;

Expand Down Expand Up @@ -638,6 +645,8 @@ public:
DifferenceOpT difference_op = {},
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);

constexpr bool may_alias = true;
constexpr bool read_left = false;

Expand Down
3 changes: 3 additions & 0 deletions cub/cub/device/device_copy.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
# pragma system_header
#endif // no system header

#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_batch_memcpy.cuh>

#include <thrust/system/cuda/detail/core/triple_chevron_launch.h>
Expand Down Expand Up @@ -170,6 +171,8 @@ struct DeviceCopy
uint32_t num_ranges,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);

// Integer type large enough to hold any offset in [0, num_ranges)
using RangeOffsetT = uint32_t;

Expand Down
7 changes: 7 additions & 0 deletions cub/cub/device/device_for.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
# pragma system_header
#endif // no system header

#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_for.cuh>
#include <cub/util_namespace.cuh>

Expand Down Expand Up @@ -574,6 +575,7 @@ public:
template <class ShapeT, class OpT>
CUB_RUNTIME_FUNCTION static cudaError_t Bulk(ShapeT shape, OpT op, cudaStream_t stream = {})
{
CUB_NVTX_RANGE_SCOPE;
static_assert(::cuda::std::is_integral<ShapeT>::value, "ShapeT must be an integral type");
using offset_t = ShapeT;
return detail::for_each::dispatch_t<offset_t, OpT>::dispatch(static_cast<offset_t>(shape), op, stream);
Expand Down Expand Up @@ -630,6 +632,7 @@ public:
CUB_RUNTIME_FUNCTION static cudaError_t
ForEachN(RandomAccessIteratorT first, NumItemsT num_items, OpT op, cudaStream_t stream = {})
{
CUB_NVTX_RANGE_SCOPE;
using offset_t = NumItemsT;
using use_vectorization_t = ::cuda::std::integral_constant<bool, false>;

Expand Down Expand Up @@ -689,6 +692,8 @@ public:
CUB_RUNTIME_FUNCTION static cudaError_t
ForEach(RandomAccessIteratorT first, RandomAccessIteratorT last, OpT op, cudaStream_t stream = {})
{
CUB_NVTX_RANGE_SCOPE;

using offset_t = typename THRUST_NS_QUALIFIER::iterator_traits<RandomAccessIteratorT>::difference_type;

const auto num_items = static_cast<offset_t>(THRUST_NS_QUALIFIER::distance(first, last));
Expand Down Expand Up @@ -750,6 +755,7 @@ public:
CUB_RUNTIME_FUNCTION static cudaError_t
ForEachCopyN(RandomAccessIteratorT first, NumItemsT num_items, OpT op, cudaStream_t stream = {})
{
CUB_NVTX_RANGE_SCOPE;
static_assert(THRUST_NS_QUALIFIER::is_contiguous_iterator<RandomAccessIteratorT>::value,
"Iterator must be contiguous");

Expand Down Expand Up @@ -810,6 +816,7 @@ public:
CUB_RUNTIME_FUNCTION static cudaError_t
ForEachCopy(RandomAccessIteratorT first, RandomAccessIteratorT last, OpT op, cudaStream_t stream = {})
{
CUB_NVTX_RANGE_SCOPE;
static_assert(THRUST_NS_QUALIFIER::is_contiguous_iterator<RandomAccessIteratorT>::value,
"Iterator must be contiguous");

Expand Down
5 changes: 5 additions & 0 deletions cub/cub/device/device_histogram.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
# pragma system_header
#endif // no system header

#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_histogram.cuh>
#include <cub/util_deprecated.cuh>

Expand Down Expand Up @@ -796,6 +797,8 @@ struct DeviceHistogram
size_t row_stride_bytes,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);

/// The sample value type of the input iterator
using SampleT = cub::detail::value_t<SampleIteratorT>;
Int2Type<sizeof(SampleT) == 1> is_byte_sample;
Expand Down Expand Up @@ -1533,6 +1536,8 @@ struct DeviceHistogram
size_t row_stride_bytes,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);

/// The sample value type of the input iterator
using SampleT = cub::detail::value_t<SampleIteratorT>;
Int2Type<sizeof(SampleT) == 1> is_byte_sample;
Expand Down
2 changes: 2 additions & 0 deletions cub/cub/device/device_memcpy.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
# pragma system_header
#endif // no system header

#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_batch_memcpy.cuh>

#include <cstdint>
Expand Down Expand Up @@ -172,6 +173,7 @@ struct DeviceMemcpy
uint32_t num_buffers,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
static_assert(std::is_pointer<cub::detail::value_t<InputBufferIt>>::value,
"DeviceMemcpy::Batched only supports copying of memory buffers."
"Please consider using DeviceCopy::Batched instead.");
Expand Down
9 changes: 9 additions & 0 deletions cub/cub/device/device_merge_sort.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#endif // no system header

#include <cub/detail/choose_offset.cuh>
#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_merge_sort.cuh>
#include <cub/util_deprecated.cuh>
#include <cub/util_namespace.cuh>
Expand Down Expand Up @@ -213,6 +214,7 @@ struct DeviceMergeSort
CompareOpT compare_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using PromotedOffsetT = detail::promote_small_offset_t<OffsetT>;

using DispatchMergeSortT =
Expand Down Expand Up @@ -367,6 +369,7 @@ struct DeviceMergeSort
CompareOpT compare_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using PromotedOffsetT = detail::promote_small_offset_t<OffsetT>;

using DispatchMergeSortT =
Expand Down Expand Up @@ -508,6 +511,7 @@ struct DeviceMergeSort
CompareOpT compare_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using PromotedOffsetT = detail::promote_small_offset_t<OffsetT>;

using DispatchMergeSortT =
Expand Down Expand Up @@ -647,6 +651,7 @@ struct DeviceMergeSort
CompareOpT compare_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using PromotedOffsetT = detail::promote_small_offset_t<OffsetT>;

using DispatchMergeSortT =
Expand Down Expand Up @@ -783,6 +788,7 @@ struct DeviceMergeSort
CompareOpT compare_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using PromotedOffsetT = detail::promote_small_offset_t<OffsetT>;

return SortPairs<KeyIteratorT, ValueIteratorT, PromotedOffsetT, CompareOpT>(
Expand Down Expand Up @@ -899,6 +905,7 @@ struct DeviceMergeSort
CompareOpT compare_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using PromotedOffsetT = detail::promote_small_offset_t<OffsetT>;

return SortKeys<KeyIteratorT, PromotedOffsetT, CompareOpT>(
Expand Down Expand Up @@ -1028,6 +1035,8 @@ struct DeviceMergeSort
CompareOpT compare_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage); // FIXME(bgruber): We want to have the name "StableSortKeysCopy", but
// SortKeysCopy will emit "SortKeysCopy"
using PromotedOffsetT = detail::promote_small_offset_t<OffsetT>;

return SortKeysCopy<KeyInputIteratorT, KeyIteratorT, PromotedOffsetT, CompareOpT>(
Expand Down
4 changes: 4 additions & 0 deletions cub/cub/device/device_partition.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
# pragma system_header
#endif // no system header

#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_select_if.cuh>
#include <cub/device/dispatch/dispatch_three_way_partition.cuh>
#include <cub/util_deprecated.cuh>
Expand Down Expand Up @@ -178,6 +179,7 @@ struct DevicePartition
int num_items,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using OffsetT = int; // Signed integer type for global offsets
using SelectOp = NullType; // Selection op (not used)
using EqualityOp = NullType; // Equality operator (not used)
Expand Down Expand Up @@ -337,6 +339,7 @@ struct DevicePartition
SelectOp select_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using OffsetT = int; // Signed integer type for global offsets
using FlagIterator = NullType*; // FlagT iterator type (not used)
using EqualityOp = NullType; // Equality operator (not used)
Expand Down Expand Up @@ -581,6 +584,7 @@ struct DevicePartition
SelectSecondPartOp select_second_part_op,
cudaStream_t stream = 0)
{
CUB_NVTX_RANGE_SCOPE_IF(d_temp_storage);
using OffsetT = int;
using DispatchThreeWayPartitionIfT = DispatchThreeWayPartitionIf<
InputIteratorT,
Expand Down
Loading

0 comments on commit f9de904

Please sign in to comment.