Skip to content

Commit

Permalink
Improvements for __cuda_array_interface__ tests (#15188)
Browse files Browse the repository at this point in the history
This PR contains a few minor improvements for `__cuda_array_interface__` and its tests. Found while working on #15111.

Authors:
  - Bradley Dice (https://github.com/bdice)

Approvers:
  - GALI PREM SAGAR (https://github.com/galipremsagar)

URL: #15188
  • Loading branch information
bdice authored Mar 5, 2024
1 parent 8d073e4 commit 1f5fcf6
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 8 deletions.
5 changes: 4 additions & 1 deletion python/cudf/cudf/core/single_column_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,10 @@ def __cuda_array_interface__(self):
try:
return self._column.__cuda_array_interface__
except NotImplementedError:
raise AttributeError
raise AttributeError(
f"'{type(self).__name__}' object has no attribute "
"'__cuda_array_interface__'"
)

@_cudf_nvtx_annotate
def factorize(self, sort=False, use_na_sentinel=True):
Expand Down
20 changes: 13 additions & 7 deletions python/cudf/cudf/tests/test_cuda_array_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
from contextlib import ExitStack as does_not_raise

import cupy
import numba.cuda
import numpy as np
import pandas as pd
import pytest
from numba import cuda

import cudf
from cudf.core.buffer.spill_manager import get_global_manager
Expand All @@ -25,7 +25,7 @@ def test_cuda_array_interface_interop_in(dtype, module):
if dtype in DATETIME_TYPES:
expectation = pytest.raises(ValueError)
elif module == "numba":
module_constructor = cuda.to_device
module_constructor = numba.cuda.to_device

with expectation:
module_data = module_constructor(np_data)
Expand Down Expand Up @@ -55,7 +55,7 @@ def to_host_function(x):
return cupy.asnumpy(x)

elif module == "numba":
module_constructor = cuda.as_cuda_array
module_constructor = numba.cuda.as_cuda_array

def to_host_function(x):
return x.copy_to_host()
Expand Down Expand Up @@ -89,7 +89,7 @@ def to_host_function(x):

elif module == "numba":
expectation = pytest.raises(NotImplementedError)
module_constructor = cuda.as_cuda_array
module_constructor = numba.cuda.as_cuda_array

def to_host_function(x):
return x.copy_to_host()
Expand Down Expand Up @@ -135,9 +135,11 @@ def test_cuda_array_interface_as_column(dtype, nulls, mask_type):

if mask_type == "bools":
if nulls == "some":
obj.__cuda_array_interface__["mask"] = cuda.to_device(mask)
obj.__cuda_array_interface__["mask"] = numba.cuda.to_device(mask)
elif nulls == "all":
obj.__cuda_array_interface__["mask"] = cuda.to_device([False] * 10)
obj.__cuda_array_interface__["mask"] = numba.cuda.to_device(
[False] * 10
)

expect = sr
got = cudf.Series(obj)
Expand Down Expand Up @@ -193,7 +195,11 @@ def test_cuda_array_interface_pytorch():

assert_eq(got, cudf.Series(buffer, dtype=np.bool_))

# TODO: This test fails with PyTorch 2. Is it still expected to be valid?
# TODO: This test fails with PyTorch 2. It appears that PyTorch
# checks that the pointer is device-accessible even when the
# size is zero. See
# https://github.com/pytorch/pytorch/issues/98133
#
# index = cudf.Index([], dtype="float64")
# tensor = torch.tensor(index)
# got = cudf.Index(tensor)
Expand Down

0 comments on commit 1f5fcf6

Please sign in to comment.