Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ENH: add cupy tensor space #1231

Open
wants to merge 36 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 35 commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
d028dd5
ENH: add cupy tensor space
Oct 6, 2017
dd2b7c4
MAINT: improve import of cupy
Nov 23, 2017
e1ce7fb
WIP: fix cupy->numpy transfers
Nov 23, 2017
f282947
WIP: fix custom cupy kernels
Nov 23, 2017
b84ee8d
MAINT: minor fixes
Nov 23, 2017
4a0c67c
WIP: implement cupy space unit tests
Nov 23, 2017
1d431c6
WIP: fix cublas stuff
Nov 24, 2017
2fdebff
WIP: fix cublas, weight propagation, implement (add,multiply).(reduce…
Nov 25, 2017
3bdc3f0
TST: ignore cupy_tensors in doctests if cupy is not available
Nov 25, 2017
6fbcc81
MAINT: fix various numpy and cupy tensor things
Nov 25, 2017
f6a8022
ENH: allow indexing of CupyTensor with CupyTensor
Nov 25, 2017
ee386ab
MAINT: minor stuff
Nov 25, 2017
2b7103c
MAINT: change weighted inf-norm to ignore weight
Nov 25, 2017
2ed9ae1
ENH: make comparison of cupy arrays in tests faster
Nov 25, 2017
8414458
WIP: fix tensor tests for cupy
Nov 25, 2017
7d5bf9d
MAINT: adapt all_almost_equal to cupy
Nov 26, 2017
9cf8917
WIP: fix cupy tensor tests
Nov 26, 2017
e47f78a
WIP: fix cupy tests
Nov 27, 2017
0fc7de3
ENH: make ufuncs and ufunc_ops work with 2 outputs
Nov 27, 2017
9f24b7b
MAINT: make weighting methods numpy-independent
Nov 27, 2017
1422fc0
WIP: fix cupy tests
Nov 27, 2017
cbd7b87
WIP: fix cupy stuff
Nov 28, 2017
cbdfe04
WIP: fix cupy tensor tests
Nov 28, 2017
2703c3e
WIP: fix pspace noise_array and out aliasing issue
Nov 29, 2017
fbfba6c
ENH: implement Numpy-style broadcasting in pspace ufuncs
Nov 29, 2017
71ccc46
WIP: simplify tests and add utils for array modules
Nov 29, 2017
392762d
ENH: add impl to writable_array
Nov 29, 2017
cd12e84
ENH: add impl to asarray, tests, and fix real and imag for cupy
Nov 29, 2017
6a9f733
WIP: add asarray helper, fix diff_ops for cupy
Nov 30, 2017
dfeee85
MAINT: fix utils
Nov 30, 2017
66728f2
MAINT: exclude unsupported ufunc operators
Nov 30, 2017
23149ee
MAINT: fix bad parens in test
Nov 30, 2017
6efe98c
BUG: fix indexing with cupy tensor
Nov 30, 2017
daac62e
MAINT: minor fix in diff_ops
Nov 30, 2017
58b921d
TST: mark cupy-only test as skip if cupy is not available
Dec 1, 2017
eb10834
MAINT: fix Py2-incompatible doctest in utility
Dec 1, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
86 changes: 45 additions & 41 deletions odl/discr/diff_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
from odl.discr.lp_discr import DiscreteLp
from odl.operator.tensor_ops import PointwiseTensorFieldOperator
from odl.space import ProductSpace
from odl.util import writable_array, signature_string, indent
from odl.util import (
writable_array, asarray, array_module, signature_string, indent)


__all__ = ('PartialDerivative', 'Gradient', 'Divergence', 'Laplacian')
Expand Down Expand Up @@ -137,9 +138,9 @@ def _call(self, x, out=None):
if out is None:
out = self.range.element()

# TODO: this pipes CUDA arrays through NumPy. Write native operator.
with writable_array(out) as out_arr:
finite_diff(x.asarray(), axis=self.axis, dx=self.dx,
impl = self.domain.impl
with writable_array(out, impl=impl) as out_arr:
finite_diff(x, axis=self.axis, dx=self.dx, impl=impl,
method=self.method, pad_mode=self.pad_mode,
pad_const=self.pad_const, out=out_arr)
return out
Expand Down Expand Up @@ -190,8 +191,7 @@ def __repr__(self):

def __str__(self):
"""Return ``str(self)``."""
dom_ran_str = '\n-->\n'.join([repr(self.domain), repr(self.range)])
return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str))
return repr(self)


class Gradient(PointwiseTensorFieldOperator):
Expand Down Expand Up @@ -347,16 +347,15 @@ def _call(self, x, out=None):
if out is None:
out = self.range.element()

x_arr = x.asarray()
ndim = self.domain.ndim
dx = self.domain.cell_sides
impl = self.domain.impl

for axis in range(ndim):
with writable_array(out[axis]) as out_arr:
finite_diff(x_arr, axis=axis, dx=dx[axis], method=self.method,
pad_mode=self.pad_mode,
pad_const=self.pad_const,
out=out_arr)
with writable_array(out[axis], impl=impl) as out_arr:
finite_diff(x, axis=axis, dx=dx[axis], impl=impl,
method=self.method, pad_mode=self.pad_mode,
pad_const=self.pad_const, out=out_arr)
return out

def derivative(self, point=None):
Expand Down Expand Up @@ -414,8 +413,7 @@ def __repr__(self):

def __str__(self):
"""Return ``str(self)``."""
dom_ran_str = '\n-->\n'.join([repr(self.domain), repr(self.range)])
return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str))
return repr(self)


class Divergence(PointwiseTensorFieldOperator):
Expand Down Expand Up @@ -494,10 +492,12 @@ def __init__(self, domain=None, range=None, method='forward',
... [2., 3., 4., 5., 6.]])
>>> f = div.domain.element([data, data])
>>> div_f = div(f)
>>> print(div_f)
[[ 2., 2., 2., 2., -3.],
[ 2., 2., 2., 2., -4.],
[ -1., -2., -3., -4., -12.]]
>>> div_f
uniform_discr([ 0., 0.], [ 3., 5.], (3, 5)).element(
[[ 2., 2., 2., 2., -3.],
[ 2., 2., 2., 2., -4.],
[ -1., -2., -3., -4., -12.]]
)

Verify adjoint:

Expand Down Expand Up @@ -559,11 +559,13 @@ def _call(self, x, out=None):

ndim = self.range.ndim
dx = self.range.cell_sides
impl = self.range.impl

tmp = np.empty(out.shape, out.dtype, order=out.space.default_order)
with writable_array(out) as out_arr:
tmp = array_module(impl).empty(
out.shape, out.dtype, order=out.space.default_order)
Copy link
Member Author

@kohr-h kohr-h Dec 1, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This of course assumes that impl has the same interface as numpy.

Perhaps it's better to implement wrappers for the most common functions, like asarray (already done), empty, and whatever else is needed. That would be more extensible.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That would be how e.g. Keras does it, but I say we leave this for now. Changing should be rather easy (just search replace basically).

with writable_array(out, impl=impl) as out_arr:
for axis in range(ndim):
finite_diff(x[axis], axis=axis, dx=dx[axis],
finite_diff(x[axis], axis=axis, dx=dx[axis], impl=impl,
method=self.method, pad_mode=self.pad_mode,
pad_const=self.pad_const,
out=tmp)
Expand Down Expand Up @@ -623,8 +625,7 @@ def __repr__(self):

def __str__(self):
"""Return ``str(self)``."""
dom_ran_str = '\n-->\n'.join([repr(self.domain), repr(self.range)])
return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str))
return repr(self)


class Laplacian(PointwiseTensorFieldOperator):
Expand Down Expand Up @@ -714,24 +715,23 @@ def _call(self, x, out=None):
else:
out.set_zero()

x_arr = x.asarray()
out_arr = out.asarray()
tmp = np.empty(out.shape, out.dtype, order=out.space.default_order)

ndim = self.domain.ndim
dx = self.domain.cell_sides
impl = self.domain.impl
tmp = array_module(impl).empty(
out.shape, out.dtype, order=out.space.default_order)

with writable_array(out) as out_arr:
with writable_array(out, impl=impl) as out_arr:
for axis in range(ndim):
# TODO: this can be optimized
finite_diff(x_arr, axis=axis, dx=dx[axis] ** 2,
finite_diff(x, axis=axis, dx=dx[axis] ** 2, impl=impl,
method='forward',
pad_mode=self.pad_mode,
pad_const=self.pad_const, out=tmp)

out_arr += tmp

finite_diff(x_arr, axis=axis, dx=dx[axis] ** 2,
finite_diff(x, axis=axis, dx=dx[axis] ** 2, impl=impl,
method='backward',
pad_mode=self.pad_mode,
pad_const=self.pad_const, out=tmp)
Expand Down Expand Up @@ -781,11 +781,11 @@ def __repr__(self):

def __str__(self):
"""Return ``str(self)``."""
dom_ran_str = '\n-->\n'.join([repr(self.domain), repr(self.range)])
return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str))
return repr(self)


def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):
def finite_diff(f, axis, dx=1.0, method='forward', out=None, impl='numpy',
**kwargs):
"""Calculate the partial derivative of ``f`` along a given ``axis``.

In the interior of the domain of f, the partial derivative is computed
Expand Down Expand Up @@ -819,6 +819,9 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):
out : `numpy.ndarray`, optional
An N-dimensional array to which the output is written. Has to have
the same shape as the input array ``f``.
impl : str, optional
Implementation backend for array manipulations. Usually handled by
the calling code.
pad_mode : string, optional
The padding mode to use outside the domain.

Expand Down Expand Up @@ -883,7 +886,7 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):
>>> out is finite_diff(f, axis=0, out=out)
True
"""
f_arr = np.asarray(f)
f_arr = asarray(f, impl=impl)
ndim = f_arr.ndim

if f_arr.shape[axis] < 2:
Expand Down Expand Up @@ -912,8 +915,9 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):
pad_const = kwargs.pop('pad_const', 0)
pad_const = f.dtype.type(pad_const)

arrmod = array_module(impl)
if out is None:
out = np.empty_like(f_arr)
out = arrmod.empty_like(f_arr)
else:
if out.shape != f.shape:
raise ValueError('expected output shape {}, got {}'
Expand All @@ -931,24 +935,24 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):

# create slice objects: initially all are [:, :, ..., :]

# Swap axes so that the axis of interest is first. This is a O(1)
# Swap axes so that the axis of interest is first. This is an O(1)
# operation and is done to simplify the code below.
out, out_in = np.swapaxes(out, 0, axis), out
f_arr = np.swapaxes(f_arr, 0, axis)
out, out_in = arrmod.swapaxes(out, 0, axis), out
f_arr = arrmod.swapaxes(f_arr, 0, axis)

# Interior of the domain of f
if method == 'central':
# 1D equivalent: out[1:-1] = (f[2:] - f[:-2])/2.0
np.subtract(f_arr[2:], f_arr[:-2], out=out[1:-1])
arrmod.subtract(f_arr[2:], f_arr[:-2], out=out[1:-1])
out[1:-1] /= 2.0

elif method == 'forward':
# 1D equivalent: out[1:-1] = (f[2:] - f[1:-1])
np.subtract(f_arr[2:], f_arr[1:-1], out=out[1:-1])
arrmod.subtract(f_arr[2:], f_arr[1:-1], out=out[1:-1])

elif method == 'backward':
# 1D equivalent: out[1:-1] = (f[1:-1] - f[:-2])
np.subtract(f_arr[1:-1], f_arr[:-2], out=out[1:-1])
arrmod.subtract(f_arr[1:-1], f_arr[:-2], out=out[1:-1])

# Boundaries
if pad_mode == 'constant':
Expand Down
16 changes: 9 additions & 7 deletions odl/discr/discretization.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,16 +331,18 @@ def copy(self):
"""Create an identical (deep) copy of this element."""
return self.space.element(self.tensor.copy())

def asarray(self, out=None):
"""Extract the data of this array as a numpy array.
def asarray(self, out=None, impl='numpy'):
"""Extract the data of this element as an array.

Parameters
----------
out : `numpy.ndarray`, optional
Array in which the result should be written in-place.
Has to be contiguous and of the correct dtype.
out : ndarray, optional
Array into which the result should be written. Must be contiguous
and of the correct dtype.
impl : str, optional
Array backend for the output, used when ``out`` is not given.
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should probably mention where the valid choices come from.

"""
return self.tensor.asarray(out=out)
return self.tensor.asarray(out=out, impl=impl)

def astype(self, dtype):
"""Return a copy of this element with new ``dtype``.
Expand Down Expand Up @@ -409,7 +411,7 @@ def __setitem__(self, indices, values):
indices = indices.tensor
if isinstance(values, type(self)):
values = values.tensor
self.tensor.__setitem__(indices, values)
self.tensor[indices] = values

def sampling(self, ufunc, **kwargs):
"""Sample a continuous function and assign to this element.
Expand Down
23 changes: 0 additions & 23 deletions odl/discr/lp_discr.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,29 +670,6 @@ def conj(self, out=None):
self.tensor.conj(out=out.tensor)
return out

def __setitem__(self, indices, values):
"""Set values of this element.

Parameters
----------
indices : int or `slice`
The position(s) that should be set
values : scalar or `array-like`
Value(s) to be assigned.
If ``indices`` is an integer, ``values`` must be a scalar
value.
If ``indices`` is a slice, ``values`` must be
broadcastable to the size of the slice (same size,
shape ``(1,)`` or scalar).
For ``indices == slice(None)``, i.e. in the call
``vec[:] = values``, a multi-dimensional array of correct
shape is allowed as ``values``.
"""
if values in self.space:
self.tensor[indices] = values.tensor
else:
super(DiscreteLpElement, self).__setitem__(indices, values)

def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""Interface to Numpy's ufunc machinery.

Expand Down
8 changes: 4 additions & 4 deletions odl/set/space.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,10 +374,10 @@ def __pow__(self, shape):
>>> r2 ** 4
ProductSpace(rn(2), 4)

Multiple powers work as expected:
Multiple powers work as expected (first entry is outermost power):

>>> r2 ** (4, 2)
ProductSpace(ProductSpace(rn(2), 4), 2)
>>> r2 ** (3, 4)
ProductSpace(ProductSpace(rn(2), 4), 3)
"""
from odl.space import ProductSpace

Expand All @@ -387,7 +387,7 @@ def __pow__(self, shape):
shape = tuple(shape)

pspace = self
for n in shape:
for n in reversed(shape):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why this? Wouldn't our users expect r2 ** (3, 4) == (r2 ** 3) ** 4?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think so. When doing space[i], the index slices along the outermost power, i.e., for space ** (3, 4) you expect the valid indices i to be 0, 1, 2, no? It's the same logic as (R^m)^n == R^(n x m).
In your example, I would expect that r2 ** (3, 4) has shape (3, 4, 2).

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I concur with your statement. It certainly makes sense.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was an actual bug.

pspace = ProductSpace(pspace, n)

return pspace
Expand Down
3 changes: 3 additions & 0 deletions odl/space/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
from .npy_tensors import *
__all__ += npy_tensors.__all__

from .cupy_tensors import *
__all__ += cupy_tensors.__all__

from .pspace import *
__all__ += pspace.__all__

Expand Down
27 changes: 6 additions & 21 deletions odl/space/base_tensors.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
"""Base classes for implementations of tensor spaces."""

from __future__ import print_function, division, absolute_import
from builtins import object
from numbers import Integral
import numpy as np

Expand All @@ -18,7 +17,8 @@
from odl.util import (
is_numeric_dtype, is_real_dtype, is_floating_dtype,
is_real_floating_dtype, is_complex_floating_dtype, safe_int_conv,
array_str, dtype_str, signature_string, indent, writable_array)
array_str, dtype_str, signature_string, indent, writable_array,
none_context)
from odl.util.ufuncs import TensorSpaceUfuncs
from odl.util.utility import TYPE_MAP_R2C, TYPE_MAP_C2R

Expand Down Expand Up @@ -814,26 +814,11 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):

# --- Evaluate ufunc --- #

# Trivial context used to create a single code path for the ufunc
# evaluation. For `None` output parameter(s), this is used instead of
# `writable_array`.
class CtxNone(object):
"""Trivial context manager class.

When used as ::

with CtxNone() as obj:
# do stuff with `obj`

the returned ``obj`` is ``None``.
"""
__enter__ = __exit__ = lambda *_: None

if method == '__call__':
if ufunc.nout == 1:
# Make context for output (trivial one returns `None`)
if out is None:
out_ctx = CtxNone()
out_ctx = none_context()
else:
out_ctx = writable_array(out, **array_kwargs)

Expand All @@ -850,11 +835,11 @@ class CtxNone(object):
if out1 is not None:
out1_ctx = writable_array(out1, **array_kwargs)
else:
out1_ctx = CtxNone()
out1_ctx = none_context()
if out2 is not None:
out2_ctx = writable_array(out2, **array_kwargs)
else:
out2_ctx = CtxNone()
out2_ctx = none_context()

# Evaluate ufunc
with out1_ctx as out1_arr, out2_ctx as out2_arr:
Expand All @@ -871,7 +856,7 @@ class CtxNone(object):
else: # method != '__call__'
# Make context for output (trivial one returns `None`)
if out is None:
out_ctx = CtxNone()
out_ctx = none_context()
else:
out_ctx = writable_array(out, **array_kwargs)

Expand Down
Loading