Skip to content

Commit

Permalink
upgrade dlpack to 0.5 (#8262)
Browse files Browse the repository at this point in the history
Update dlpack from `0.3` to `>=0.5,<0.6.0a0`, and fix for breaking changes introduced in `0.4`.

closes #7679

Authors:
  - Christopher Harris (https://github.com/cwharris)

Approvers:
  - Paul Taylor (https://github.com/trxcllnt)
  - Ray Douglass (https://github.com/raydouglass)
  - Keith Kraus (https://github.com/kkraus14)
  - MithunR (https://github.com/mythrocks)
  - GALI PREM SAGAR (https://github.com/galipremsagar)

URL: #8262
  • Loading branch information
cwharris authored May 25, 2021
1 parent 6db757b commit eea8cab
Show file tree
Hide file tree
Showing 9 changed files with 26 additions and 26 deletions.
2 changes: 1 addition & 1 deletion conda/environments/cudf_dev_cuda11.0.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ dependencies:
- dask==2021.4.0
- distributed>=2.22.0,<=2021.4.0
- streamz
- dlpack==0.3
- dlpack>=0.5,<0.6.0a0
- arrow-cpp=1.0.1
- arrow-cpp-proc * cuda
- double-conversion
Expand Down
2 changes: 1 addition & 1 deletion conda/environments/cudf_dev_cuda11.2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ dependencies:
- dask==2021.4.0
- distributed>=2.22.0,<=2021.4.0
- streamz
- dlpack==0.3
- dlpack>=0.5,<0.6.0a0
- arrow-cpp=1.0.1
- arrow-cpp-proc * cuda
- double-conversion
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/cudf/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ requirements:
- cython >=0.29,<0.30
- setuptools
- numba >=0.53.1
- dlpack 0.3
- dlpack>=0.5,<0.6.0a0
- pyarrow 1.0.1
- libcudf {{ version }}
- rmm {{ minor_version }}
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/libcudf/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ requirements:
- cudatoolkit {{ cuda_version }}.*
- arrow-cpp 1.0.1
- arrow-cpp-proc * cuda
- dlpack 0.3
- dlpack>=0.5,<0.6.0a0
run:
- {{ pin_compatible('cudatoolkit', max_pin='x.x') }}
- arrow-cpp-proc * cuda
Expand Down
2 changes: 1 addition & 1 deletion cpp/cmake/thirdparty/CUDF_GetDLPack.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,6 @@ function(find_and_configure_dlpack VERSION)
set(DLPACK_INCLUDE_DIR "${dlpack_SOURCE_DIR}/include" PARENT_SCOPE)
endfunction()

set(CUDF_MIN_VERSION_dlpack 0.3)
set(CUDF_MIN_VERSION_dlpack 0.5)

find_and_configure_dlpack(${CUDF_MIN_VERSION_dlpack})
4 changes: 2 additions & 2 deletions cpp/include/cudf/interop.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ namespace cudf {
/**
* @brief Convert a DLPack DLTensor into a cudf table
*
* The `device_type` of the DLTensor must be `kDLGPU`, `kDLCPU`, or
* `kDLCPUPinned`, and `device_id` must match the current device. The `ndim`
* The `device_type` of the DLTensor must be `kDLCPU`, `kDLCuda`, or
* `kDLCUDAHost`, and `device_id` must match the current device. The `ndim`
* must be set to 1 or 2. The `dtype` must have 1 lane and the bitsize must
* match a supported `cudf::data_type`.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ function(find_and_configure_cudf VERSION)
endif()
endfunction()

set(CUDA_KAFKA_MIN_VERSION_cudf "${CUDA_KAFKA_VERSION_MAJOR}.${CUDA_KAFKA_VERSION_MINOR}")
set(CUDA_KAFKA_MIN_VERSION_cudf "${CUDA_KAFKA_VERSION_MAJOR}.${CUDA_KAFKA_VERSION_MINOR}.00")
find_and_configure_cudf(${CUDA_KAFKA_MIN_VERSION_cudf})

if(cudf_ADDED)
Expand Down
14 changes: 7 additions & 7 deletions cpp/src/interop/dlpack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,15 +137,15 @@ std::unique_ptr<table> from_dlpack(DLManagedTensor const* managed_tensor,
auto const& tensor = managed_tensor->dl_tensor;

// We can copy from host or device pointers
CUDF_EXPECTS(kDLGPU == tensor.ctx.device_type || kDLCPU == tensor.ctx.device_type ||
kDLCPUPinned == tensor.ctx.device_type,
"DLTensor must be GPU, CPU, or pinned type");
CUDF_EXPECTS(tensor.device.device_type == kDLCPU || tensor.device.device_type == kDLCUDA ||
tensor.device.device_type == kDLCUDAHost,
"DLTensor device type must be CPU, CUDA or CUDAHost");

// Make sure the current device ID matches the Tensor's device ID
if (tensor.ctx.device_type != kDLCPU) {
if (tensor.device.device_type != kDLCPU) {
int device_id = 0;
CUDA_TRY(cudaGetDevice(&device_id));
CUDF_EXPECTS(tensor.ctx.device_id == device_id, "DLTensor device ID must be current device");
CUDF_EXPECTS(tensor.device.device_id == device_id, "DLTensor device ID must be current device");
}

// Currently only 1D and 2D tensors are supported
Expand Down Expand Up @@ -234,8 +234,8 @@ DLManagedTensor* to_dlpack(table_view const& input,
tensor.strides[1] = num_rows;
}

CUDA_TRY(cudaGetDevice(&tensor.ctx.device_id));
tensor.ctx.device_type = kDLGPU;
CUDA_TRY(cudaGetDevice(&tensor.device.device_id));
tensor.device.device_type = kDLCUDA;

// If there is only one column, then a 1D tensor can just copy the pointer
// to the data in the column, and the deleter should not delete the original
Expand Down
22 changes: 11 additions & 11 deletions cpp/tests/interop/dlpack_test.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -111,7 +111,7 @@ TEST_F(DLPackUntypedTests, UnsupportedDeviceTypeFromDlpack)
unique_managed_tensor tensor(cudf::to_dlpack(input));

// Spoof an unsupported device type
tensor->dl_tensor.ctx.device_type = kDLOpenCL;
tensor->dl_tensor.device.device_type = kDLOpenCL;
EXPECT_THROW(cudf::from_dlpack(tensor.get()), cudf::logic_error);
}

Expand All @@ -122,7 +122,7 @@ TEST_F(DLPackUntypedTests, InvalidDeviceIdFromDlpack)
unique_managed_tensor tensor(cudf::to_dlpack(input));

// Spoof the wrong device ID
tensor->dl_tensor.ctx.device_id += 1;
tensor->dl_tensor.device.device_id += 1;
EXPECT_THROW(cudf::from_dlpack(tensor.get()), cudf::logic_error);
}

Expand Down Expand Up @@ -242,7 +242,7 @@ TYPED_TEST(DLPackNumericTests, ToDlpack1D)

auto const& tensor = result->dl_tensor;
validate_dtype<TypeParam>(tensor.dtype);
EXPECT_EQ(kDLGPU, tensor.ctx.device_type);
EXPECT_EQ(kDLCUDA, tensor.device.device_type);
EXPECT_EQ(1, tensor.ndim);
EXPECT_EQ(uint64_t{0}, tensor.byte_offset);
EXPECT_EQ(nullptr, tensor.strides);
Expand Down Expand Up @@ -275,7 +275,7 @@ TYPED_TEST(DLPackNumericTests, ToDlpack2D)

auto const& tensor = result->dl_tensor;
validate_dtype<TypeParam>(tensor.dtype);
EXPECT_EQ(kDLGPU, tensor.ctx.device_type);
EXPECT_EQ(kDLCUDA, tensor.device.device_type);
EXPECT_EQ(2, tensor.ndim);
EXPECT_EQ(uint64_t{0}, tensor.byte_offset);

Expand Down Expand Up @@ -341,12 +341,12 @@ TYPED_TEST(DLPackNumericTests, FromDlpackCpu)
int64_t strides[2] = {1, 5};

DLManagedTensor tensor{};
tensor.dl_tensor.ctx.device_type = kDLCPU;
tensor.dl_tensor.dtype = get_dtype<T>();
tensor.dl_tensor.ndim = 2;
tensor.dl_tensor.byte_offset = offset;
tensor.dl_tensor.shape = shape;
tensor.dl_tensor.strides = strides;
tensor.dl_tensor.device.device_type = kDLCPU;
tensor.dl_tensor.dtype = get_dtype<T>();
tensor.dl_tensor.ndim = 2;
tensor.dl_tensor.byte_offset = offset;
tensor.dl_tensor.shape = shape;
tensor.dl_tensor.strides = strides;

thrust::host_vector<T> host_vector(data.begin(), data.end());
tensor.dl_tensor.data = host_vector.data();
Expand Down

0 comments on commit eea8cab

Please sign in to comment.