Skip to content

Commit

Permalink
Merge branch 'cleanup' of https://github.com/galipremsagar/cudf into …
Browse files Browse the repository at this point in the history
…cleanup
  • Loading branch information
galipremsagar committed Mar 5, 2024
2 parents 37230ca + ed2f342 commit 7e3ffb7
Show file tree
Hide file tree
Showing 10 changed files with 24 additions and 130 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,8 @@ jobs:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.11" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.2.2")))
# This selects "ARCH=amd64 + the latest supported Python + CUDA".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
build_type: ${{ inputs.build_type || 'branch' }}
branch: ${{ inputs.branch }}
sha: ${{ inputs.sha }}
Expand Down
9 changes: 6 additions & 3 deletions .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -128,15 +128,17 @@ jobs:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.11" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.2.2")))
# This selects "ARCH=amd64 + the latest supported Python + CUDA".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
build_type: pull-request
script: "ci/build_wheel_dask_cudf.sh"
wheel-tests-dask-cudf:
needs: wheel-build-dask-cudf
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.11" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.2.2")))
# This selects "ARCH=amd64 + the latest supported Python + CUDA".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
build_type: pull-request
script: ci/test_wheel_dask_cudf.sh
devcontainer:
Expand All @@ -154,7 +156,8 @@ jobs:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.11" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.2.2")))
# This selects "ARCH=amd64 + the latest supported Python + CUDA".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
build_type: pull-request
script: ci/cudf_pandas_scripts/run_tests.sh
# pandas-tests:
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,8 @@ jobs:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.11" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.2.2")))
# This selects "ARCH=amd64 + the latest supported Python + CUDA".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
build_type: nightly
branch: ${{ inputs.branch }}
date: ${{ inputs.date }}
Expand Down
3 changes: 0 additions & 3 deletions cpp/include/cudf/detail/utilities/device_atomics.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,6 @@ struct genericAtomicOperationImpl<float, DeviceSum, 4> {
}
};

#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
// `atomicAdd(double)` is supported after cuda architecture 6.0
template <>
struct genericAtomicOperationImpl<double, DeviceSum, 8> {
using T = double;
Expand All @@ -180,7 +178,6 @@ struct genericAtomicOperationImpl<double, DeviceSum, 8> {
return atomicAdd(addr, update_value);
}
};
#endif

template <>
struct genericAtomicOperationImpl<int32_t, DeviceSum, 4> {
Expand Down
9 changes: 2 additions & 7 deletions cpp/src/filling/repeat.cu
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,8 @@ struct count_accessor {
std::enable_if_t<std::is_integral_v<T>, cudf::size_type> operator()(rmm::cuda_stream_view stream)
{
using ScalarType = cudf::scalar_type_t<T>;
#if 1
// TODO: temporary till cudf::scalar's value() function is marked as const
auto p_count = const_cast<ScalarType*>(static_cast<ScalarType const*>(this->p_scalar));
#else
auto p_count = static_cast<ScalarType const*>(this->p_scalar);
#endif
auto count = p_count->value(stream);
auto p_count = static_cast<ScalarType const*>(this->p_scalar);
auto count = p_count->value(stream);
// static_cast is necessary due to bool
CUDF_EXPECTS(static_cast<int64_t>(count) <= std::numeric_limits<cudf::size_type>::max(),
"count should not exceed the column size limit",
Expand Down
4 changes: 0 additions & 4 deletions cpp/src/hash/managed.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,5 @@ struct managed {

inline bool isPtrManaged(cudaPointerAttributes attr)
{
#if CUDART_VERSION >= 10000
return (attr.type == cudaMemoryTypeManaged);
#else
return attr.isManaged;
#endif
}
10 changes: 0 additions & 10 deletions cpp/src/io/comp/snap.cu
Original file line number Diff line number Diff line change
Expand Up @@ -153,17 +153,7 @@ static __device__ uint8_t* StoreCopy(uint8_t* dst,
*/
static inline __device__ uint32_t HashMatchAny(uint32_t v, uint32_t t)
{
#if (__CUDA_ARCH__ >= 700)
return __match_any_sync(~0, v);
#else
uint32_t err_map = 0;
for (uint32_t i = 0; i < hash_bits; i++, v >>= 1) {
uint32_t b = v & 1;
uint32_t match_b = ballot(b);
err_map |= match_b ^ -(int32_t)b;
}
return ~err_map;
#endif
}

/**
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/io/fst/agent_dfa.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ class DFASimulationCallbackWrapper {
{
uint32_t const count = transducer_table(old_state, symbol_id, read_symbol);
if (write) {
#if __CUDA_ARCH__ > 0
#if defined(__CUDA_ARCH__)
#pragma unroll 1
#endif
for (uint32_t out_char = 0; out_char < count; out_char++) {
Expand Down
11 changes: 6 additions & 5 deletions cpp/src/io/json/read_json.cu
Original file line number Diff line number Diff line change
Expand Up @@ -140,10 +140,11 @@ size_type find_first_delimiter_in_chunk(host_span<std::unique_ptr<cudf::io::data
return find_first_delimiter(buffer, delimiter, stream);
}

bool should_load_whole_source(json_reader_options const& reader_opts)
bool should_load_whole_source(json_reader_options const& opts, size_t source_size)
{
return reader_opts.get_byte_range_offset() == 0 and //
reader_opts.get_byte_range_size() == 0;
auto const range_offset = opts.get_byte_range_offset();
auto const range_size = opts.get_byte_range_size();
return range_offset == 0 and (range_size == 0 or range_size >= source_size);
}

/**
Expand All @@ -168,7 +169,7 @@ auto get_record_range_raw_input(host_span<std::unique_ptr<datasource>> sources,
reader_opts.get_byte_range_offset(),
reader_opts.get_byte_range_size(),
stream);
if (should_load_whole_source(reader_opts)) return buffer;
if (should_load_whole_source(reader_opts, sources[0]->size())) return buffer;
auto first_delim_pos =
reader_opts.get_byte_range_offset() == 0 ? 0 : find_first_delimiter(buffer, '\n', stream);
if (first_delim_pos == -1) {
Expand Down Expand Up @@ -212,7 +213,7 @@ table_with_metadata read_json(host_span<std::unique_ptr<datasource>> sources,
return legacy::read_json(sources, reader_opts, stream, mr);
}

if (not should_load_whole_source(reader_opts)) {
if (reader_opts.get_byte_range_offset() != 0 or reader_opts.get_byte_range_size() != 0) {
CUDF_EXPECTS(reader_opts.is_enabled_lines(),
"Specifying a byte range is supported only for JSON Lines");
CUDF_EXPECTS(sources.size() == 1,
Expand Down
Loading

0 comments on commit 7e3ffb7

Please sign in to comment.