Skip to content

Commit

Permalink
WIP: fix cupy->numpy transfers
Browse files Browse the repository at this point in the history
  • Loading branch information
Holger Kohr committed Nov 23, 2017
1 parent dd2b7c4 commit e1ce7fb
Showing 1 changed file with 101 additions and 39 deletions.
140 changes: 101 additions & 39 deletions odl/space/cupy_tensors.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@
import numpy as np
import warnings

from odl.set import RealNumbers
from odl.set import RealNumbers, ComplexNumbers
from odl.space.base_tensors import TensorSpace, Tensor
from odl.space.weighting import (
Weighting, ArrayWeighting, ConstWeighting,
CustomInner, CustomNorm, CustomDist)
from odl.util import dtype_str, is_floating_dtype, signature_string
from odl.util import (
array_str, dtype_str, is_floating_dtype, signature_string, indent)

try:
import cupy
Expand Down Expand Up @@ -599,13 +600,14 @@ def __eq__(self, other):
>>> same_space == space
True
"""
return (super().__eq__(other) and
return (super(CupyTensorSpace, self).__eq__(other) and
self.device == other.device and
self.weighting == other.weighting)

def __hash__(self):
"""Return ``hash(self)``."""
return hash((super().__hash__(), self.device, self.weighting))
return hash((super(CupyTensorSpace, self).__hash__(), self.device,
self.weighting))

def _lincomb(self, a, x1, b, x2, out):
"""Linear combination of ``x1`` and ``x2``.
Expand Down Expand Up @@ -874,12 +876,16 @@ def default_dtype(field=None):
dtype : `numpy.dtype`
Numpy data type specifier. The returned defaults are:
``RealNumbers()`` : ``np.dtype('float64')``
- ``RealNumbers()`` or ``None`` : ``np.dtype('float64')``
- ``ComplexNumbers()`` : ``np.dtype('complex128')``
``ComplexNumbers()`` : not supported
These choices correspond to the defaults of the ``cupy``
library.
"""
if field is None or field == RealNumbers():
return np.dtype('float64')
elif field == ComplexNumbers():
return np.dtype('complex128')
else:
raise ValueError('no default data type defined for field {}.'
''.format(field))
Expand Down Expand Up @@ -1029,7 +1035,7 @@ def copy(self):
Returns
-------
copy : `pygpu._array.ndgpuarray`
copy : `CupyTensor`
A deep copy.
Examples
Expand All @@ -1056,7 +1062,7 @@ def __getitem__(self, indices):
Returns
-------
values : scalar or `pygpu._array.ndgpuarray`
values : scalar or `cupy.core.core.ndarray`
The value(s) at the index (indices).
Examples
Expand Down Expand Up @@ -1107,11 +1113,11 @@ def __getitem__(self, indices):
arr = self.data[indices]
if arr.shape == ():
if arr.dtype.kind == 'f':
return float(np.asarray(arr))
return float(cupy.asnumpy(arr))
elif arr.dtype.kind == 'c':
return complex(np.asarray(arr))
return complex(cupy.asnumpy(arr))
elif arr.dtype.kind in ('u', 'i'):
return int(np.asarray(arr))
return int(cupy.asnumpy(arr))
else:
raise RuntimeError("no conversion for dtype {}"
"".format(arr.dtype))
Expand Down Expand Up @@ -1280,12 +1286,22 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
for further details. See also the `general documentation on
Numpy ufuncs`_.
.. note::
This implementation looks for native ufuncs in ``pygpu.ufuncs``
and falls back to the basic implementation with Numpy arrays
in case no native ufunc is available. That fallback version
comes with significant overhead due to data copies between
host and device.
.. warning::
Apart from ``'__call__'`` (invoked by, e.g., ``np.add(x, y))``,
CuPy has no native implementation of ufunc methods like
``'reduce'`` or ``'accumulate'``. We manually implement the
mappings (covering most use cases)
- ``np.add.reduce`` -> ``cupy.sum``
- ``np.add.accumulate`` -> ``cupy.cumsum``
- ``np.multiply.reduce`` -> ``cupy.prod``
- ``np.multiply.reduce`` -> ``cupy.cumprod``.
**All other such methods will run Numpy code and be slow**!
Please consult the `CuPy documentation on ufuncs
<https://docs-cupy.chainer.org/en/stable/reference/ufunc.html>`_
to check the current state of the library.
.. note::
When an ``out`` parameter is specified, and (one of) it has
Expand Down Expand Up @@ -1507,10 +1523,10 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
inp.data if isinstance(inp, type(self)) else inp
for inp in inputs)

# For native ufuncs, we turn non-scalar inputs into cupy arrays,
# as a workaround for https://github.com/cupy/cupy/issues/594
# TODO: remove code when the upstream issue is fixed
if use_native:
# TODO: remove when upstream issue is fixed
# For native ufuncs, we turn non-scalar inputs into cupy arrays,
# as a workaround for https://github.com/cupy/cupy/issues/594
inputs, orig_inputs = [], inputs
for inp in orig_inputs:
if (isinstance(inp, cupy.ndarray) or
Expand All @@ -1519,6 +1535,20 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
inputs.append(inp)
else:
inputs.append(cupy.array(inp))
elif method != 'at':
# TODO: remove when upstream issue is fixed
# For non-native ufuncs (except `at`), we need ot cast our tensors
# and Cupy arrays to Numpy arrays explicitly, since `__array__`
# and friends are not implemented. See
# https://github.com/cupy/cupy/issues/589
inputs, orig_inputs = [], inputs
for inp in orig_inputs:
if isinstance(inp, cupy.ndarray):
inputs.append(cupy.asnumpy(inp))
elif isinstance(inp, CupyTensor):
inputs.append(cupy.asnumpy(inp.data))
else:
inputs.append(inp)

# --- Get some parameters for later --- #

Expand Down Expand Up @@ -1595,20 +1625,19 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
def eval_at_via_npy(*inputs, **kwargs):
import ctypes
cupy_arr = inputs[0]
npy_arr = np.asarray(cupy_arr)
npy_arr = cupy.asnumpy(cupy_arr)
new_inputs = (npy_arr,) + inputs[1:]
super(CupyTensor, self).__array_ufunc__(
ufunc, method, *new_inputs, **kwargs)
# Workaround for https://github.com/cupy/cupy/issues/593
# TODO: use cupy_arr[:] = npy_arr when it's fixed and not
# slower
# TODO: use cupy_arr[:] = npy_arr when available
cupy_arr.data.copy_from_host(
npy_arr.ctypes.data_as(ctypes.c_void_p), npy_arr.nbytes)

if use_native:
# Native method could exist but raise `NotImplementedError`
# or return `NotImplemented`, falling back to Numpy case
# then, too
# or return `NotImplemented`. We fall back to Numpy also in
# that situation.
try:
res = native_method(*inputs, **kwargs)
except NotImplementedError:
Expand All @@ -1626,8 +1655,8 @@ def eval_at_via_npy(*inputs, **kwargs):

if use_native:
# Native method could exist but raise `NotImplementedError`
# or return `NotImplemented`, falling back to base case
# then, too
# or return `NotImplemented`. We fall back to Numpy also in
# that situation.
try:
res = native_method(*inputs, **kwargs)
except NotImplementedError:
Expand All @@ -1653,8 +1682,8 @@ def eval_at_via_npy(*inputs, **kwargs):
if is_floating_dtype(res.dtype):
if res.shape != self.shape:
# Don't propagate weighting if shape changes
weighting = CupyTensorSpaceConstWeighting(1.0,
exponent)
weighting = CupyTensorSpaceConstWeighting(
1.0, exponent)
spc_kwargs = {'weighting': weighting}
else:
spc_kwargs = {}
Expand Down Expand Up @@ -1723,8 +1752,10 @@ def real(self):
real : `CupyTensor` view with real dtype
The real part of this tensor as an element of an `rn` space.
"""
# Only real dtypes currently
return self
if self.space.is_real:
return self
else:
return self.space.real_space.element(self.data.real)

@real.setter
def real(self, newreal):
Expand All @@ -1737,7 +1768,7 @@ def real(self, newreal):
newreal : `array-like` or scalar
The new real part for this tensor.
"""
self.real.data[:] = newreal
self.data.real[:] = newreal

@property
def imag(self):
Expand All @@ -1748,8 +1779,10 @@ def imag(self):
imag : `CupyTensor`
The imaginary part of this tensor as an element of an `rn` space.
"""
# Only real dtypes currently
return self.space.zero()
if self.space.is_real:
return self.space.zero()
else:
return self.space.real_space.element(self.data.imag)

@imag.setter
def imag(self, newimag):
Expand All @@ -1762,7 +1795,7 @@ def imag(self, newimag):
newimag : `array-like` or scalar
The new imaginary part for this tensor.
"""
raise NotImplementedError('complex dtypes not supported')
self.data.imag[:] = newimag

def conj(self, out=None):
"""Complex conjugate of this tensor.
Expand All @@ -1779,11 +1812,17 @@ def conj(self, out=None):
The complex conjugate tensor. If ``out`` was provided,
the returned object is a reference to it.
"""
# Only real dtypes currently
if out is None:
return self.copy()
if self.space.is_real:
return self.copy()
else:
return self.space.element(self.data.conj())
else:
self.assign(out)
if self.space.is_real:
self.assign(out)
else:
# In-place not available as it seems
out[:] = self.data.conj()
return out

def __ipow__(self, other):
Expand All @@ -1806,7 +1845,8 @@ def _weighting(weights, exponent):
if np.isscalar(weights):
weighting = CupyTensorSpaceConstWeighting(weights, exponent=exponent)
else:
# TODO: sequence of 1D array-likes
# TODO: sequence of 1D array-likes, see
# https://github.com/odlgroup/odl/pull/1238
weights = cupy.array(weights, copy=False)
weighting = CupyTensorSpaceArrayWeighting(weights, exponent=exponent)
return weighting
Expand Down Expand Up @@ -2065,6 +2105,28 @@ def dist(self, x1, x2):
else:
return float(distpw(x1.data, x2.data, self.exponent, self.array))

# TODO: remove repr_part and __repr__ when cupy.ndarray.__array__
# is implemented. See
# https://github.com/cupy/cupy/issues/589
@property
def repr_part(self):
"""String usable in a space's ``__repr__`` method."""
# TODO: use edgeitems
arr_str = array_str(cupy.asnumpy(self.array), nprint=10)
optargs = [('weighting', arr_str, ''),
('exponent', self.exponent, 2.0)]
return signature_string([], optargs, sep=',\n',
mod=[[], ['!s', ':.4']])

def __repr__(self):
"""Return ``repr(self)``."""
# TODO: use edgeitems
posargs = [array_str(cupy.asnumpy(self.array), nprint=10)]
optargs = [('exponent', self.exponent, 2.0)]
inner_str = signature_string(posargs, optargs, sep=',\n',
mod=['!s', ':.4'])
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str))


class CupyTensorSpaceConstWeighting(ConstWeighting):

Expand Down

0 comments on commit e1ce7fb

Please sign in to comment.