From bb31079f7c7d6fcd897ba9e206242dba41dbe845 Mon Sep 17 00:00:00 2001 From: Holger Kohr Date: Tue, 14 Nov 2017 00:31:22 +0100 Subject: [PATCH] ENH: implement tensor-valued DiscreteLp Changes in detail: - Add dtype with shape to DiscreteLp (mostly __repr__, factory functions and some downstream methods). As a consequence, `shape_[in,out]` and `ndim_[in,out]` are added for the different types of axes, as well as `scalar_dtype`. - Add `PerAxisWeighting` and make it the default for `DiscreteLp` type spaces. Reason: this way the `tspace` knows how to deal with removed axes etc. This is important for a smooth experience with indexing and reductions over axes. Helpers for slicing weightings help structure this task. - Implement `__getitem__` for `TensorSpace` and `DiscreteLp`, including (hopefully) reasonable propagation of weights. The new `simulate_slicing` utility function simplifies this task. - Allow indexing with ODL tensors of boolean or integer dtype. - Implement correct weighting for backprojections with non-uniform angles, using per-axis weighting and a new helper `adjoint_weightings` to apply the weightings in an efficient way. The correct weighting from the range of `RayTransform` is determined by the new `proj_space_weighting` helper. - Change the space `_*_impl` methods to always expect and return Numpy arrays, and adapt the calling code. - Change behavior of `norm` and `dist` to ignoring weights for `exponent=inf`, in accordance with math. - Improve speed of `all_equal` for comparison of arrays. - Account for `None` entries in indices in the `normalized_index_expression` helper, thus allowing creation of new axes. - Remove `dicsr_sequence_space`, it was largely unused and just a maintenance burden. Use a regular `uniform-discr` from zero to `shape` instead. - Remove `Weighting.equiv()` mehtods, never used and hard to maintain (n^2 possibilities). - Remove the (largely useless) `_weighting` helper to create weighting instances since it would have been ambiguous with sequences of scalars (array or per axis?). Also remove the `npy_weighted_*` functions, they were useless, too. - Remove some dead code from tomo/util. - A bunch of minor fixes, as usual. Closes: #908 Closes: #907 Closes: #1113 Closes: #965 Closes: #286 Closes: #267 Closes: #1001 --- examples/tomo/ray_trafo_parallel_2d.py | 2 +- odl/discr/discr_mappings.py | 9 +- odl/discr/discretization.py | 13 +- odl/discr/lp_discr.py | 464 ++++++--- odl/space/base_tensors.py | 13 +- odl/space/fspace.py | 10 +- odl/space/npy_tensors.py | 888 +++++++++++++----- odl/space/weighting.py | 359 ++++--- odl/test/discr/lp_discr_test.py | 142 ++- odl/test/space/tensors_test.py | 79 +- .../trafos/backends/pywt_bindings_test.py | 8 +- odl/test/trafos/fourier_test.py | 65 +- odl/test/util/numerics_test.py | 9 +- odl/tomo/backends/astra_cpu.py | 29 +- odl/tomo/backends/astra_cuda.py | 31 +- odl/tomo/backends/skimage_radon.py | 35 +- odl/tomo/operators/ray_trafo.py | 95 +- odl/tomo/util/utility.py | 64 -- odl/trafos/fourier.py | 30 +- odl/util/normalize.py | 75 +- odl/util/numerics.py | 166 +++- odl/util/testutils.py | 14 +- 22 files changed, 1783 insertions(+), 817 deletions(-) diff --git a/examples/tomo/ray_trafo_parallel_2d.py b/examples/tomo/ray_trafo_parallel_2d.py index 0ba70384559..625c77b16d0 100644 --- a/examples/tomo/ray_trafo_parallel_2d.py +++ b/examples/tomo/ray_trafo_parallel_2d.py @@ -28,7 +28,7 @@ # projection data (or any element in the projection space). backproj = ray_trafo.adjoint(proj_data) -# Shows a slice of the phantom, projections, and reconstruction +# Show a slice of the phantom, projections, and reconstruction phantom.show(title='Phantom') proj_data.show(title='Projection data (sinogram)') backproj.show(title='Back-projected data', force_show=True) diff --git a/odl/discr/discr_mappings.py b/odl/discr/discr_mappings.py index 669f3b3c062..c12ddc68bf8 100644 --- a/odl/discr/discr_mappings.py +++ b/odl/discr/discr_mappings.py @@ -79,10 +79,11 @@ def __init__(self, map_type, fspace, partition, tspace, linear=False): 'of the function set {}' ''.format(partition, fspace.domain, fspace)) - if tspace.shape != partition.shape: - raise ValueError('`tspace.shape` not equal to `partition.shape`: ' - '{} != {}' - ''.format(tspace.shape, partition.shape)) + if tspace.shape != fspace.out_shape + partition.shape: + raise ValueError( + '`tspace.shape` not equal to ' + '`fspace.out_shape + partition.shape`: {} != {}' + ''.format(tspace.shape, partition.shape)) domain = fspace if map_type == 'sampling' else tspace range = tspace if map_type == 'sampling' else fspace diff --git a/odl/discr/discretization.py b/odl/discr/discretization.py index fb7915db7d8..0ea2b19ceff 100644 --- a/odl/discr/discretization.py +++ b/odl/discr/discretization.py @@ -375,6 +375,13 @@ def __eq__(self, other): def __getitem__(self, indices): """Return ``self[indices]``. + .. note:: + This is a basic implementation that does little more than + propagating the indexing to the `tensor` attribute. In + particular, the result will **not** be wrapped as a + `DiscretizedSpaceElement` again. Subclasses should take + care of that by overriding ``__getitem__``. + Parameters ---------- indices : int or `slice` @@ -417,9 +424,9 @@ def sampling(self, ufunc, **kwargs): Parameters ---------- ufunc : ``self.space.fspace`` element - The continuous function that should be samplingicted. + The continuous function that should be sampled. kwargs : - Additional arugments for the sampling operator implementation + Additional arguments for the sampling operator implementation. Examples -------- @@ -445,7 +452,7 @@ def interpolation(self): Returns ------- interpolation_op : `FunctionSpaceMapping` - Operatior representing a continuous interpolation of this + Operator representing a continuous interpolation of this element. Examples diff --git a/odl/discr/lp_discr.py b/odl/discr/lp_discr.py index f475fa88953..27c3699cf52 100644 --- a/odl/discr/lp_discr.py +++ b/odl/discr/lp_discr.py @@ -9,6 +9,7 @@ """Lebesgue L^p type discretizations of function spaces.""" from __future__ import print_function, division, absolute_import +from builtins import int from numbers import Integral import numpy as np @@ -22,17 +23,18 @@ from odl.set import RealNumbers, ComplexNumbers, IntervalProd from odl.space import FunctionSpace, ProductSpace from odl.space.entry_points import tensor_space_impl -from odl.space.weighting import ConstWeighting +from odl.space.weighting import ConstWeighting, PerAxisWeighting from odl.util import ( apply_on_boundary, is_real_dtype, is_complex_floating_dtype, is_string, is_floating_dtype, is_numeric_dtype, dtype_str, array_str, signature_string, indent, npy_printoptions, - normalized_scalar_param_list, safe_int_conv, normalized_nodes_on_bdry) + normalized_scalar_param_list, safe_int_conv, normalized_nodes_on_bdry, + normalized_index_expression, simulate_slicing) __all__ = ('DiscreteLp', 'DiscreteLpElement', 'uniform_discr_frompartition', 'uniform_discr_fromspace', 'uniform_discr_fromintv', 'uniform_discr', - 'uniform_discr_fromdiscr', 'discr_sequence_space') + 'uniform_discr_fromdiscr') _SUPPORTED_INTERP = ('nearest', 'linear') @@ -99,8 +101,8 @@ def __init__(self, fspace, partition, tspace, interp='nearest', **kwargs): else: # Got sequence of strings if len(interp) != partition.ndim: - raise ValueError('expected {} (ndim) entries in `interp`, ' - 'got {}'.format(partition.ndim, len(interp))) + raise ValueError('expected {} entries in `interp`, got {}' + ''.format(partition.ndim, len(interp))) self.__interp_byaxis = tuple(str(s).lower() for s in interp) if any(s not in _SUPPORTED_INTERP for s in self.interp_byaxis): @@ -192,18 +194,74 @@ def grid(self): @property def shape(self): - """Shape of the underlying partition.""" + """Shape of the underlying tensor space. + + For spaces of discretized vector- or tensor-valued functions, + this includes the output components as :: + + shape = fspace.out_shape + partition.shape + """ + return self.tspace.shape + + @property + def shape_in(self): + """Shape of the "input" part, i.e. of the `partition`. + + For spaces of discretized scalar-valued functions, this is the + same as `shape`. For vector- or tensor-valued functions, this + is the shape of *one component*. + """ return self.partition.shape + @property + def ndim_in(self): + """Number of "input" dimensions, equals ``len(self.shape_in)``.""" + return len(self.shape_in) + + @property + def shape_out(self): + """Shape of the "output" part, i.e. of the `fspace`. + + For spaces of discretized vector- or tensor-valued functions, + this is the shape of a function value; for scalar-valued functions, + it is an empty tuple. + """ + return self.fspace.out_shape + + @property + def ndim_out(self): + """Number of "output" dimensions, equals ``len(self.shape_out)``.""" + return len(self.shape_out) + @property def ndim(self): - """Number of dimensions (= number of axes).""" - return self.partition.ndim + """Total number of dimensions, including input and output.""" + return self.ndim_in + self.ndim_out + + @property + def dtype(self): + """Data type of this space. + + For spaces of discretized vector- or tensor-valued functions, + the data type has a shape. For the base data type without shape, + use `scalar_dtype`. + """ + # Construct this way since `fspace` may have dtype `None` + return np.dtype((self.tspace.dtype, self.fspace.out_shape)) + + @property + def scalar_dtype(self): + """Scalar data type of this space. + + For spaces of discretized vector- or tensor-valued functions, + this is the variant of `dtype` without a shape. + """ + return self.tspace.dtype @property def size(self): - """Total number of underlying partition cells.""" - return self.partition.size + """Total number of entries in the underlying tensor space.""" + return self.tspace.size @property def cell_sides(self): @@ -253,10 +311,10 @@ def tangent_bundle(self): This space can be identified with the power space ``X^d`` as used in this implementation. """ - if self.ndim == 0: + if self.ndim_in == 0: return ProductSpace(field=self.field) else: - return ProductSpace(self, self.ndim) + return ProductSpace(self, self.ndim_in) @property def is_uniformly_weighted(self): @@ -423,6 +481,128 @@ def _dist(self, x, y): # TODO: add byaxis_out when discretized tensor-valued functions are # available + def __getitem__(self, indices): + """Return ``self[indices]``. + + For most cases, indexing is implemented such that for an element + ``x in space``, it holds :: + + x[indices] in space[indices] + + Space indexing does not work with index arrays, boolean arrays and + "fancy indexing". + + .. note:: + This method is a default implementation that propagates only + ``shape`` and ``dtype``. Subclasses with more properties + need to override the method. + + Examples + -------- + A single integer slices along the first axis (index does not + matter as long as it lies within the bounds): + + >>> space = odl.uniform_discr([0, 0, 0, 0], [1, 2, 3, 4], + ... shape=(2, 4, 6, 8)) + >>> space[0] + uniform_discr([ 0., 0., 0.], [ 2., 3., 4.], (4, 6, 8)) + >>> space[1] + uniform_discr([ 0., 0., 0.], [ 2., 3., 4.], (4, 6, 8)) + + Multiple indices slice into corresponding axes from the left: + + >>> space[0, 0] + uniform_discr([ 0., 0.], [ 3., 4.], (6, 8)) + >>> space[0, 1, 1] + uniform_discr(0.0, 4.0, 8) + + Ellipsis (``...``) and ``slice(None)`` (``:``) can be used to keep + one or several axes intact: + + >>> space[0, :, 1, :] + uniform_discr([ 0., 0.], [ 2., 4.], (4, 8)) + >>> space[..., 0, 0] + uniform_discr([ 0., 0.], [ 1., 2.], (2, 4)) + + With slices, parts of axes can be selected: + + >>> space[0, :3, 1:4, :] + uniform_discr([ 0. , 0.5, 0. ], [ 1.5, 2. , 4. ], (3, 3, 8)) + + Indexing is also supported for vector- or tensor-valued function + spaces. In that case, leftmost indices are associated with the + output dimensions, rightmost indices with the input (domain) + dimensions: + + >>> space = odl.uniform_discr(0, 1, 5, dtype=(float, (2, 3))) + >>> space.shape + (2, 3, 5) + >>> space[0] # (2, 3)-valued -> (3,)-valued (first row) + uniform_discr(0.0, 1.0, 5, dtype=('float64', (3,))) + >>> space[:, 0] # (2, 3)-valued -> (2,)-valued (first column) + uniform_discr(0.0, 1.0, 5, dtype=('float64', (2,))) + >>> space[..., :2] # last axis = domain + uniform_discr(0.0, 0.4, 2, dtype=('float64', (2, 3))) + """ + # Unwrap tensor for passing on to underlying indexing methods + if isinstance(indices, self.element_type): + indices = indices.tensor + + if getattr(indices, 'dtype', object) == bool: + # Raising here as signal for `DiscreteLpElement.__getitem__` + # This is an optimization that avoids counting the number of + # `True` entries here *and* in the actual element indexing + # operation. We do it lazily instead. + raise TypeError('cannot index space with a boolean element or ' + 'array') + + # The overall logic of the slicing is as follows: + # - indices are normalized to be a tuple of length `ndim` + # - the `tspace` is indexed with the whole index tuple + # - the `partition` is indexed with the "in" part of the tuple + # only, i.e., the last part excluding `n_out = len(shape_out)` + # axes + # - the `out_shape` for the function space is determined using the + # first `n_out` index tuple entries + # - the new function space is constructed from `new_partition.set`, + # `fspace.scalar_out_dtype` and the new `out_shape` + + # Normalize the index expression based on the full shape, and + # split into "in" and "out" parts + indices = normalized_index_expression(indices, self.shape) + # Avoid array comparison with `==` if `indices.contains()` is used + if any(idx is None for idx in indices): + raise ValueError('creating new axes is not supported.') + + indices_out = indices[:len(self.shape_out)] + indices_in = indices[len(self.shape_out):] + + # Index tensor space + res_tspace = self.tspace[indices] + + # Index partition, manually removing collapsed axes + res_part = self.partition[indices_in] + _, collapsed_axes_in, _ = simulate_slicing(self.shape_in, indices_in) + remaining_axes_in = [i for i in range(len(self.shape_in)) + if i not in collapsed_axes_in] + res_part = res_part.byaxis[remaining_axes_in] + + # Determine new fspace + sliced_shape_out, _, _ = simulate_slicing(self.shape_out, indices_out) + res_fspace = FunctionSpace( + res_part.set, + out_dtype=(self.fspace.scalar_out_dtype, sliced_shape_out)) + + # Further attributes for the new space + res_interp = [self.interp_byaxis[i] for i in range(len(self.shape_in)) + if i in remaining_axes_in] + res_labels = [self.axis_labels[i] for i in range(len(self.shape_in)) + if i in remaining_axes_in] + + # Create new space + return DiscreteLp(res_fspace, res_part, res_tspace, interp=res_interp, + axis_labels=res_labels) + @property def byaxis_in(self): """Object to index along input (domain) dimensions. @@ -466,25 +646,7 @@ def __getitem__(self, indices): """ fspace = space.fspace.byaxis_in[indices] part = space.partition.byaxis[indices] - - if isinstance(space.weighting, ConstWeighting): - # Need to manually construct `tspace` since it doesn't - # know where its weighting factor comes from - try: - iter(indices) - except TypeError: - newshape = space.shape[indices] - else: - newshape = tuple(space.shape[int(i)] for i in indices) - - weighting = part.cell_volume - tspace = type(space.tspace)( - newshape, space.dtype, - exponent=space.exponent, weighting=weighting) - else: - # Other weighting schemes are handled correctly by - # the tensor space - tspace = space.tspace.byaxis[indices] + tspace = space.tspace.byaxis[indices] try: iter(indices) @@ -510,12 +672,12 @@ def __repr__(self): """Return ``repr(self)``.""" # Clunky check if the factory repr can be used if (uniform_partition_fromintv( - self.fspace.domain, self.shape, + self.fspace.domain, self.shape_in, nodes_on_bdry=False) == self.partition): use_uniform = True nodes_on_bdry = False elif (uniform_partition_fromintv( - self.fspace.domain, self.shape, + self.fspace.domain, self.shape_in, nodes_on_bdry=True) == self.partition): use_uniform = True nodes_on_bdry = True @@ -524,44 +686,69 @@ def __repr__(self): if use_uniform: ctor = 'uniform_discr' - if self.ndim == 1: - posargs = [self.min_pt[0], self.max_pt[0], self.shape[0]] + if self.ndim_in == 1: + posargs = [self.min_pt[0], self.max_pt[0], self.shape_in[0]] posmod = [':.4', ':.4', ''] else: - posargs = [self.min_pt, self.max_pt, self.shape] + posargs = [self.min_pt, self.max_pt, self.shape_in] posmod = [array_str, array_str, ''] default_dtype_s = dtype_str( self.tspace.default_dtype(RealNumbers())) - dtype_s = dtype_str(self.dtype) - optargs = [('interp', self.interp, 'nearest'), + optargs = [('dtype', dtype_s, default_dtype_s), + ('interp', self.interp, 'nearest'), ('impl', self.impl, 'numpy'), - ('nodes_on_bdry', nodes_on_bdry, False), - ('dtype', dtype_s, default_dtype_s)] + ('nodes_on_bdry', nodes_on_bdry, False)] # Add weighting stuff if not equal to default - if (self.exponent == float('inf') or - self.ndim == 0 or - not is_floating_dtype(self.dtype)): - # In these cases, weighting constant 1 is the default - if (not isinstance(self.weighting, ConstWeighting) or - not np.isclose(self.weighting.const, 1.0)): - optargs.append(('weighting', self.weighting.const, None)) + default_facs_out = [1.0] * self.ndim_out + if self.exponent != 2.0: + weighting_part = self.weighting.repr_part + + elif (self.exponent == float('inf') or + self.ndim_in == 0 or + not is_floating_dtype(self.dtype)): + # In these cases, weighting factors 1 is the default + default_facs_in = [1.0] * self.ndim_in + default_facs = default_facs_out + default_facs_in + if (not isinstance(self.weighting, PerAxisWeighting) or + all(fac.ndim == 0 and np.isclose(fac, default_facs[i]) + for i, fac in enumerate(self.weighting.factors))): + weighting_part = self.weighting.repr_part + else: - if (not isinstance(self.weighting, ConstWeighting) or - not np.isclose(self.weighting.const, - self.cell_volume)): - optargs.append(('weighting', self.weighting.const, None)) + # Here, per-axis weighting with the cell sides as factors is + # the default + default_facs_in = list(self.cell_sides) + default_facs = default_facs_out + default_facs_in + + if isinstance(self.weighting, ConstWeighting): + weighting_part = ('weighting={:.4}' + ''.format(self.weighting.const)) + elif (isinstance(self.weighting, PerAxisWeighting) and + all(fac.ndim == 0 and np.isclose(fac, default_facs[i]) + for i, fac in enumerate(self.weighting.factors))): + weighting_part = '' + else: + weighting_part = self.weighting.repr_part optmod = [''] * len(optargs) - if self.dtype in (float, complex, int, bool): - optmod[3] = '!s' + if self.dtype.base in (float, complex, int, bool): + optmod[0] = '!s' + + sep = ', ' if len(weighting_part) <= 30 else [', ', ',\n', ',\n'] with npy_printoptions(precision=4): - inner_str = signature_string(posargs, optargs, + inner_str = signature_string(posargs, optargs, sep=sep, mod=[posmod, optmod]) - return '{}({})'.format(ctor, inner_str) + if len(weighting_part) <= 30: + if weighting_part: + inner_str += ', ' + weighting_part + return '{}({})'.format(ctor, inner_str) + else: + inner_str += ',\n' + weighting_part + return '{}(\n{}\n)'.format(ctor, indent(inner_str)) else: ctor = self.__class__.__name__ @@ -573,7 +760,7 @@ def __repr__(self): sep=[',\n', ', ', ',\n'], mod=['!r', '!s']) - return '{}({})'.format(ctor, indent(inner_str)) + return '{}(\n{}\n)'.format(ctor, indent(inner_str)) def __str__(self): """Return ``str(self)``.""" @@ -670,28 +857,49 @@ def conj(self, out=None): self.tensor.conj(out=out.tensor) return out - def __setitem__(self, indices, values): - """Set values of this element. + def __getitem__(self, indices): + """Return ``self[indices]``. Parameters ---------- - indices : int or `slice` - The position(s) that should be set - values : scalar or `array-like` - Value(s) to be assigned. - If ``indices`` is an integer, ``values`` must be a scalar - value. - If ``indices`` is a slice, ``values`` must be - broadcastable to the size of the slice (same size, - shape ``(1,)`` or scalar). - For ``indices == slice(None)``, i.e. in the call - ``vec[:] = values``, a multi-dimensional array of correct - shape is allowed as ``values``. + indices : index expression + The position(s) that should be accessed. + + Returns + ------- + values : `DiscreteLpElement` + The value(s) at the index (indices). + + Examples + -------- """ - if values in self.space: - self.tensor[indices] = values.tensor + if isinstance(indices, type(self)): + indices = indices.tensor.data + + try: + iter(indices) + except TypeError: + if indices is None: + raise ValueError('creating new axes is not supported.') + else: + # Avoid array comparison with `==` that happens with + # `indices.contains()` + if any(idx is None for idx in indices): + raise ValueError('creating new axes is not supported.') + + res_tens = self.tensor[indices] + if np.isscalar(res_tens): + return res_tens + + try: + res_space = self.space[indices] + except (ValueError, TypeError): + # TODO: adapt error type + # Cannot index space like this, e.g., with an index array or + # a boolean array. Just return the tensor. + return res_tens else: - super(DiscreteLpElement, self).__setitem__(indices, values) + return res_space.element(res_tens) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): """Interface to Numpy's ufunc machinery. @@ -934,8 +1142,6 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): reduced_axes = [i for i in range(self.ndim) if i not in axis] - weighting = self.space.weighting - # --- Evaluate ufunc --- # if method == '__call__': @@ -1060,24 +1266,9 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): labels2 = [lbl + ' (2)' for lbl in inp2.space.axis_labels] labels = labels1 + labels2 - if all(isinstance(inp.space.weighting, ConstWeighting) - for inp in inputs): - # For constant weighting, use the product of the - # two weighting constants. The result tensor space - # cannot know about the "correct" way to combine the - # two constants, so we need to do it manually here. - weighting = (inp1.space.weighting.const * - inp2.space.weighting.const) - tspace = type(res_tens.space)( - res_tens.shape, res_tens.dtype, - exponent=res_tens.space.exponent, - weighting=weighting) - else: - # Otherwise `TensorSpace` knows how to handle this - tspace = res_tens.space - res_space = DiscreteLp( - fspace, part, tspace, interp, axis_labels=labels) + fspace, part, res_tens.space, interp, + axis_labels=labels) result = res_space.element(res_tens) elif method == 'reduce': @@ -1287,9 +1478,12 @@ def uniform_discr_frompartition(partition, dtype=None, impl='numpy', **kwargs): It defines the domain and the functions and the grid for discretization. dtype : optional - Data type for the discretized space, must be understood by the - `numpy.dtype` constructor. The default for ``None`` depends on the - ``impl`` backend, usually it is ``'float64'`` or ``'float32'``. + Data type of the created space. Can be provided in any way the + `numpy.dtype` constructor understands, e.g. as built-in type or + as a string. + + To create a space of vector- or tensor-valued functions, + use a dtype with a shape, e.g., ``np.dtype((float, (2, 3)))``. impl : string, optional Implementation of the data storage arrays kwargs : @@ -1324,21 +1518,21 @@ def uniform_discr_frompartition(partition, dtype=None, impl='numpy', **kwargs): dtype = np.dtype(dtype) fspace = FunctionSpace(partition.set, out_dtype=dtype) - ds_type = tspace_type(fspace, impl, dtype) + ds_type = tspace_type(fspace, impl, fspace.scalar_out_dtype) if dtype is None: dtype = ds_type.default_dtype() weighting = kwargs.pop('weighting', None) exponent = kwargs.pop('exponent', 2.0) - if weighting is None and is_numeric_dtype(dtype): - if exponent == float('inf') or partition.ndim == 0: - weighting = 1.0 - else: - weighting = partition.cell_volume - - tspace = ds_type(partition.shape, dtype, exponent=exponent, - weighting=weighting) + if (weighting is None and + is_numeric_dtype(dtype) and + exponent != float('inf')): + weighting = np.concatenate([np.ones(len(fspace.out_shape)), + partition.cell_sides]) + + tspace = ds_type(fspace.out_shape + partition.shape, dtype.base, + exponent=exponent, weighting=weighting) return DiscreteLp(fspace, partition, tspace, **kwargs) @@ -1368,8 +1562,8 @@ def uniform_discr_fromspace(fspace, shape, dtype=None, impl='numpy', **kwargs): Examples -------- >>> intv = odl.IntervalProd(0, 1) - >>> space = odl.FunctionSpace(intv) - >>> uniform_discr_fromspace(space, 10) + >>> fspace = odl.FunctionSpace(intv) + >>> odl.uniform_discr_fromspace(fspace, 10) uniform_discr(0.0, 1.0, 10) See Also @@ -1399,6 +1593,10 @@ def uniform_discr_fromspace(fspace, shape, dtype=None, impl='numpy', **kwargs): raise ValueError('cannot safely cast from output data {} type of ' 'the function space to given data type {}' ''.format(fspace.out, dtype_in)) + if dtype.shape != fspace.out_shape: + raise ValueError('`dtype.shape` {} not equal to ' + '`fspace.out_shape` {}' + ''.format(dtype.shape, fspace.out_shape)) if fspace.field == RealNumbers() and not is_real_dtype(dtype): raise ValueError('cannot discretize real space {} with ' @@ -1411,9 +1609,7 @@ def uniform_discr_fromspace(fspace, shape, dtype=None, impl='numpy', **kwargs): ''.format(fspace, dtype)) nodes_on_bdry = kwargs.pop('nodes_on_bdry', False) - partition = uniform_partition_fromintv(fspace.domain, shape, - nodes_on_bdry) - + partition = uniform_partition_fromintv(fspace.domain, shape, nodes_on_bdry) return uniform_discr_frompartition(partition, dtype, impl, **kwargs) @@ -1503,11 +1699,14 @@ def uniform_discr(min_pt, max_pt, shape, dtype=None, impl='numpy', **kwargs): weighting : optional Use weighted inner product, norm, and dist. The following types are supported as ``weighting``: - - ``None``: Use the cell volume as weighting constant (default). + - ``None``: Use the cell sides as weighting constants (default). - ``float``: Weighting by a constant. - - array-like: Point-wise weighting by an array. - - `Weighting`: Use weighting class as-is. Compatibility - with this space's elements is not checked during init. + - sequence of length ``ndim``: Separate weighting per axis. + Entries can be constants or 1D arrays. + - array-like: Pointwise weighting by an array of the same + ``shape`` as this space. + - `Weighting`: Use this weighting as-is. Compatibility + with this space is not checked during init. Returns ------- @@ -1551,45 +1750,6 @@ def uniform_discr(min_pt, max_pt, shape, dtype=None, impl='numpy', **kwargs): return uniform_discr_fromintv(intv_prod, shape, dtype, impl, **kwargs) -def discr_sequence_space(shape, dtype=None, impl='numpy', **kwargs): - """Return an object mimicing the sequence space ``l^p(R^d)``. - - The returned object is a `DiscreteLp` on the domain ``[0, shape - 1]``, - using a uniform grid with stride 1. - - Parameters - ---------- - shape : int or sequence of ints - Number of element entries per axis. - dtype : optional - Data type for the discretized space, must be understood by the - `numpy.dtype` constructor. The default for ``None`` depends on the - ``impl`` backend, usually it is ``'float64'`` or ``'float32'``. - impl : string, optional - Implementation of the data storage arrays. - kwargs : - Additional keyword parameters, see `uniform_discr` for details. - Note that ``nodes_on_bdry`` cannot be given. - - Returns - ------- - seqspc : `DiscreteLp` - Sequence-space-like discrete Lp. - - Examples - -------- - >>> seq_spc = discr_sequence_space((3, 3)) - >>> seq_spc.one().norm() == 3.0 - True - >>> seq_spc = discr_sequence_space((3, 3), exponent=1) - >>> seq_spc.one().norm() == 9.0 - True - """ - shape = np.atleast_1d(shape) - return uniform_discr([0] * len(shape), shape - 1, shape, dtype, impl, - nodes_on_bdry=True, **kwargs) - - def uniform_discr_fromdiscr(discr, min_pt=None, max_pt=None, shape=None, cell_sides=None, **kwargs): """Return a discretization based on an existing one. diff --git a/odl/space/base_tensors.py b/odl/space/base_tensors.py index 67092cf1a17..893d9ca2b20 100644 --- a/odl/space/base_tensors.py +++ b/odl/space/base_tensors.py @@ -18,6 +18,7 @@ from odl.util import ( is_numeric_dtype, is_real_dtype, is_floating_dtype, is_real_floating_dtype, is_complex_floating_dtype, safe_int_conv, + simulate_slicing, array_str, dtype_str, signature_string, indent, writable_array) from odl.util.ufuncs import TensorSpaceUfuncs from odl.util.utility import TYPE_MAP_R2C, TYPE_MAP_C2R @@ -91,18 +92,18 @@ def __init__(self, shape, dtype): self.__shape = dtype.shape + shape self.__dtype = dtype.base - if is_real_dtype(self.dtype): + if is_real_dtype(self.__dtype): # real includes non-floating-point like integers field = RealNumbers() - self.__real_dtype = self.dtype + self.__real_dtype = self.__dtype self.__real_space = self - self.__complex_dtype = TYPE_MAP_R2C.get(self.dtype, None) + self.__complex_dtype = TYPE_MAP_R2C.get(self.__dtype, None) self.__complex_space = None # Set in first call of astype - elif is_complex_floating_dtype(self.dtype): + elif is_complex_floating_dtype(self.__dtype): field = ComplexNumbers() - self.__real_dtype = TYPE_MAP_C2R[self.dtype] + self.__real_dtype = TYPE_MAP_C2R[self.__dtype] self.__real_space = None # Set in first call of astype - self.__complex_dtype = self.dtype + self.__complex_dtype = self.__dtype self.__complex_space = self else: field = None diff --git a/odl/space/fspace.py b/odl/space/fspace.py index b63f8031ed8..720624ff6d3 100644 --- a/odl/space/fspace.py +++ b/odl/space/fspace.py @@ -1327,8 +1327,14 @@ def __call__(self, x, out=None, **kwargs): elif self.space.tensor_valued: # The out object can be any array-like of objects with shapes - # that should all be broadcastable to scalar_out_shape. - results = np.array(out) + # that should all be broadcastable to scalar_out_shape + try: + results = np.array(out) + except ValueError: + # For some broadcasting sitations, the above call fails. + # We need to use `object` explicitly then + results = np.array(out, dtype=object) + if results.dtype == object or scalar_in: # Some results don't have correct shape, need to # broadcast diff --git a/odl/space/npy_tensors.py b/odl/space/npy_tensors.py index 9629ee9ce38..c03bc1a18ca 100644 --- a/odl/space/npy_tensors.py +++ b/odl/space/npy_tensors.py @@ -9,8 +9,9 @@ """NumPy implementation of tensor spaces.""" from __future__ import print_function, division, absolute_import -from future.utils import native from builtins import object +from future.utils import native +from future.moves.itertools import zip_longest import ctypes from functools import partial import numpy as np @@ -19,11 +20,12 @@ from odl.set.space import LinearSpaceTypeError from odl.space.base_tensors import TensorSpace, Tensor from odl.space.weighting import ( - Weighting, ArrayWeighting, ConstWeighting, + Weighting, ArrayWeighting, ConstWeighting, PerAxisWeighting, CustomInner, CustomNorm, CustomDist) from odl.util import ( - dtype_str, signature_string, is_real_dtype, is_numeric_dtype, - writable_array, is_floating_dtype) + dtype_str, signature_string, is_real_dtype, is_numeric_dtype, array_str, + indent, fast_1d_tensor_mult, writable_array, is_floating_dtype, + simulate_slicing, normalized_index_expression) __all__ = ('NumpyTensorSpace',) @@ -101,14 +103,14 @@ def __init__(self, shape, dtype=None, **kwargs): Use weighted inner product, norm, and dist. The following types are supported as ``weighting``: - ``None``: no weighting, i.e. weighting with ``1.0`` (default). - - `Weighting`: Use this weighting as-is. Compatibility - with this space's elements is not checked during init. - - ``float``: Weighting by a constant. - - array-like: Pointwise weighting by an array. + - ``None``: No weighting, i.e. weighting with ``1.0`` (default). + - ``float``: Weighting by a constant. + - sequence of length ``ndim``: Separate weighting per axis. + Entries can be constants or 1D arrays. + - array-like: Pointwise weighting by an array of the same + ``shape`` as this space. + - `Weighting`: Use this weighting as-is. Compatibility + with this space is not checked during init. This option cannot be combined with ``dist``, ``norm`` or ``inner``. It also cannot be used in case of @@ -161,9 +163,9 @@ def __init__(self, shape, dtype=None, **kwargs): Notes ----- - - A distance function or metric on a space :math:`\mathcal{X}` + - A distance function or metric on a space :math:`X` is a mapping - :math:`d:\mathcal{X} \\times \mathcal{X} \\to \mathbb{R}` + :math:`d:X \\times X \\to \mathbb{R}` satisfying the following conditions for all space elements :math:`x, y, z`: @@ -172,8 +174,8 @@ def __init__(self, shape, dtype=None, **kwargs): * :math:`d(x, y) = d(y, x)`, * :math:`d(x, y) \\leq d(x, z) + d(z, y)`. - - A norm on a space :math:`\mathcal{X}` is a mapping - :math:`\| \cdot \|:\mathcal{X} \\to \mathbb{R}` + - A norm on a space :math:`X` is a mapping + :math:`\| \cdot \|:X \\to \mathbb{R}` satisfying the following conditions for all space elements :math:`x, y`: and scalars :math:`s`: @@ -183,11 +185,11 @@ def __init__(self, shape, dtype=None, **kwargs): * :math:`\| x+y\| \\leq \| x\| + \| y\|`. - - An inner product on a space :math:`\mathcal{X}` over a field + - An inner product on a space :math:`X` over a field :math:`\mathbb{F} = \mathbb{R}` or :math:`\mathbb{C}` is a mapping - :math:`\\langle\cdot, \cdot\\rangle: \mathcal{X} \\times - \mathcal{X} \\to \mathbb{F}` + :math:`\\langle\cdot, \cdot\\rangle: X \\times + X \\to \mathbb{F}` satisfying the following conditions for all space elements :math:`x, y, z`: and scalars :math:`s`: @@ -246,6 +248,7 @@ def __init__(self, shape, dtype=None, **kwargs): # Set the weighting if weighting is not None: + if isinstance(weighting, Weighting): if weighting.impl != 'numpy': raise ValueError("`weighting.impl` must be 'numpy', " @@ -255,25 +258,51 @@ def __init__(self, shape, dtype=None, **kwargs): '`exponent`: {} != {}' ''.format(weighting.exponent, exponent)) self.__weighting = weighting + + elif np.isscalar(weighting): + if self.ndim == 1: + # Prefer per-axis weighting if possible, it behaves better + self.__weighting = NumpyTensorSpacePerAxisWeighting( + [weighting], exponent) + else: + self.__weighting = NumpyTensorSpaceConstWeighting( + weighting, exponent) + + elif len(weighting) == self.ndim: + self.__weighting = NumpyTensorSpacePerAxisWeighting( + weighting, exponent) + else: - self.__weighting = _weighting(weighting, exponent) + array = np.asarray(weighting) + if array.dtype == object: + raise ValueError( + 'invalid `weighting` provided; valid inputs are ' + '`None`, constants, sequences of length `ndim` ' + "and array-like objects of this space's `shape`") + self.__weighting = NumpyTensorSpaceArrayWeighting( + array, exponent) # Check (afterwards) that the weighting input was sane - if isinstance(self.weighting, NumpyTensorSpaceArrayWeighting): - if self.weighting.array.dtype == object: - raise ValueError('invalid `weighting` argument: {}' - ''.format(weighting)) - elif not np.can_cast(self.weighting.array.dtype, self.dtype): + if isinstance(self.__weighting, NumpyTensorSpaceArrayWeighting): + if not np.can_cast(self.__weighting.array.dtype, self.dtype): raise ValueError( 'cannot cast from `weighting` data type {} to ' 'the space `dtype` {}' - ''.format(dtype_str(self.weighting.array.dtype), + ''.format(dtype_str(self.__weighting.array.dtype), dtype_str(self.dtype))) - if self.weighting.array.shape != self.shape: + if self.__weighting.array.shape != self.shape: raise ValueError('array-like weights must have same ' 'shape {} as this space, got {}' ''.format(self.shape, - self.weighting.array.shape)) + self.__weighting.array.shape)) + + elif isinstance(self.__weighting, + NumpyTensorSpacePerAxisWeighting): + if len(self.__weighting.factors) != self.ndim: + raise ValueError( + 'per-axis weighting must have {} (=ndim) factors, ' + 'got {}'.format(self.ndim, + len(self.__weighting.factors))) elif dist is not None: self.__weighting = NumpyTensorSpaceCustomDist(dist) @@ -545,7 +574,7 @@ def _lincomb(self, a, x1, b, x2, out): >>> result is out True """ - _lincomb_impl(a, x1, b, x2, out) + _lincomb_impl(a, x1.data, b, x2.data, out.data) def _dist(self, x1, x2): """Return the distance between ``x1`` and ``x2``. @@ -762,6 +791,79 @@ def __hash__(self): return hash((super(NumpyTensorSpace, self).__hash__(), self.weighting)) + def __getitem__(self, indices): + """Return ``self[indices]``. + + For all supported cases, indexing is implemented such that for an + element ``x in space``, the statement :: + + x[indices] in space[indices] + + is ``True``. + + Space indexing works with + + - integers, + - `slice` objects, + - index arrays, i.e., 1D array-like objects containing integers, + + and combinations of the above. It does not work with boolean arrays + or more advanced "fancy indexing". + + .. note:: + This method is a default implementation that propagates only + ``shape`` and ``dtype``. Subclasses with more properties + need to override the method. + + Examples + -------- + A single integer slices along the first axis (index does not + matter as long as it lies within the bounds): + + >>> rn = odl.rn((3, 4, 5, 6)) + >>> rn[0] + rn((4, 5, 6)) + >>> rn[2] + rn((4, 5, 6)) + + Multiple indices slice into corresponding axes from the left: + + >>> rn[0, 0] + rn((5, 6)) + >>> rn[0, 1, 1] + rn(6) + + Ellipsis (``...``) and ``slice(None)`` (``:``) can be used to keep + one or several axes intact: + + >>> rn[0, :, 1, :] + rn((4, 6)) + >>> rn[..., 0, 0] + rn((3, 4)) + + With slices, parts of axes can be selected: + + >>> rn[0, :3, 1:4, ::2] + rn((3, 3, 3)) + + Array-like objects (must all have the same 1D shape) of integers are + treated as follows: if their common length is ``n``, a new axis + of length ``n`` is created at the position of the leftmost index + array, and all index array axes are collapsed (Note: this is + not so useful for spaces, more so for elements): + + >>> rn[0, 0, [0, 1, 0, 2], :] + rn((4, 6)) + >>> rn[:, [1, 1], [0, 2], :] + rn((3, 2, 6)) + >>> rn[[2, 0], [3, 3], [0, 1], [5, 2]] + rn(2) + """ + new_shape, removed_axes, _ = simulate_slicing(self.shape, indices) + weighting = slice_weighting(self.weighting, self.shape, indices) + return type(self)(shape=new_shape, dtype=self.dtype, + weighting=weighting, exponent=self.exponent) + @property def byaxis(self): """Return the subspace defined along one or several dimensions. @@ -783,7 +885,7 @@ def byaxis(self): """ space = self - class NpyTensorSpacebyaxis(object): + class NpyTensorSpaceByAxis(object): """Helper class for indexing by axis.""" @@ -792,24 +894,25 @@ def __getitem__(self, indices): try: iter(indices) except TypeError: - newshape = space.shape[indices] - else: - newshape = tuple(space.shape[i] for i in indices) - - if isinstance(space.weighting, ArrayWeighting): - new_array = np.asarray(space.weighting.array[indices]) - weighting = NumpyTensorSpaceArrayWeighting( - new_array, space.weighting.exponent) + # Integer, slice or Ellipsis + indices = list(range(space.ndim))[indices] + if not isinstance(indices, list): + indices = [indices] else: - weighting = space.weighting + indices = [int(i) for i in indices] - return type(space)(newshape, space.dtype, weighting=weighting) + new_shape = tuple(space.shape[i] for i in indices) + new_weighting = slice_weighting_by_axis(space.weighting, + space.shape, indices) + return type(space)(new_shape, space.dtype, + weighting=new_weighting, + exponent=space.exponent) def __repr__(self): """Return ``repr(self)``.""" return repr(space) + '.byaxis' - return NpyTensorSpacebyaxis() + return NpyTensorSpaceByAxis() def __repr__(self): """Return ``repr(self)``.""" @@ -1093,26 +1196,37 @@ def __getitem__(self, indices): [[-9., 2., -9.], [-9., 5., -9.]] ) + + More advanced indexing: + + >>> x = space.element([[1, 2, 3], + ... [4, 5, 6]]) + >>> x[[0, 0, 1], [2, 1, 0]] # entries (0, 2), (0, 1) and (1, 0) + rn(3).element([ 3., 2., 4.]) + >>> bool_elem = x.ufuncs.less(3) + >>> x[bool_elem] + rn(2).element([ 1., 2.]) """ - # Lazy implementation: index the array and deal with it if isinstance(indices, NumpyTensor): indices = indices.data - arr = self.data[indices] + res_arr = self.data[indices] - if np.isscalar(arr): + if np.isscalar(res_arr): if self.space.field is not None: - return self.space.field.element(arr) + return self.space.field.element(res_arr) else: - return arr + return res_arr else: - if is_numeric_dtype(self.dtype): - weighting = self.space.weighting - else: - weighting = None - space = type(self.space)( - arr.shape, dtype=self.dtype, exponent=self.space.exponent, - weighting=weighting) - return space.element(arr) + # If possible, ensure that + # `self.space[indices].shape == self[indices].shape` + try: + res_space = self.space[indices] + except TypeError: + # No weighting + res_space = type(self.space)( + res_arr.shape, dtype=self.dtype, + exponent=self.space.exponent) + return res_space.element(res_arr) def __setitem__(self, indices, values): """Implement ``self[indices] = values``. @@ -1630,14 +1744,6 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): out1 = out_tuple[0] out2 = out_tuple[1] - # --- Process `inputs` --- # - - # Convert inputs that are ODL tensors to Numpy arrays so that the - # native Numpy ufunc is called later - inputs = tuple( - inp.asarray() if isinstance(inp, type(self)) else inp - for inp in inputs) - # --- Get some parameters for later --- # # Arguments for `writable_array` and/or space constructors @@ -1649,6 +1755,18 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): exponent = self.space.exponent weighting = self.space.weighting + try: + weighting2 = inputs[1].space.weighting + except (IndexError, AttributeError): + weighting2 = None + + # --- Process `inputs` --- # + + # Convert inputs that are ODL tensors to Numpy arrays so that the + # native Numpy ufunc is called later + inputs = tuple( + inp.asarray() if isinstance(inp, type(self)) else inp + for inp in inputs) # --- Evaluate ufunc --- # @@ -1749,12 +1867,76 @@ class CtxNone(object): # Wrap result if necessary (lazily) if out is None: if is_floating_dtype(res.dtype): - if res.shape != self.shape: - # Don't propagate weighting if shape changes - weighting = NumpyTensorSpaceConstWeighting(1.0, - exponent) + + # For weighting, we need to check which axes remain in + # the result and slice the weighting accordingly + if method == 'reduce': + axis = kwargs.get('axis', 0) + try: + iter(axis) + except TypeError: + axis = [axis] + + if kwargs.get('keepdims', False): + reduced_axes = [] + else: + reduced_axes = [i for i in range(self.ndim) + if i not in axis] + weighting = slice_weighting_by_axis( + weighting, self.shape, reduced_axes) + + elif method == 'outer': + # Stack the weightings as well if possible. Stacking + # makes sense for per-axis weighting and constant + # weighting for 1D inputs. An input that has no + # weighting results in per-axis weighting with + # constants 1. + can_stack = True + if isinstance(weighting, + NumpyTensorSpacePerAxisWeighting): + factors = weighting.factors + elif (isinstance(weighting, + NumpyTensorSpaceConstWeighting) and + inputs[0].ndim == 1): + factors = (weighting.const,) + else: + can_stack = False + + if weighting2 is None: + factors = (1.0,) * (res.ndim - inputs[0].ndim) + elif isinstance(weighting2, + NumpyTensorSpacePerAxisWeighting): + factors2 = weighting2.factors + elif (isinstance(weighting2, + NumpyTensorSpaceConstWeighting) and + inputs[1].ndim == 1): + factors2 = (weighting2.const,) + elif (isinstance(weighting2, + NumpyTensorSpaceConstWeighting) and + weighting2.const == 1): + factors2 = (1.0,) * inputs[1].ndim + else: + can_stack = False + + if can_stack: + weighting = NumpyTensorSpacePerAxisWeighting( + factors + factors2, + exponent=weighting.exponent) + else: + weighting = NumpyTensorSpaceConstWeighting( + 1.0, exponent) + + elif (res.shape != self.shape and + not isinstance(weighting, + NumpyTensorSpaceConstWeighting)): + # For other cases, preserve constant weighting, and + # preserve other weightings if the shape is unchanged + weighting = NumpyTensorSpaceConstWeighting( + 1.0, exponent) + spc_kwargs = {'weighting': weighting} - else: + + else: # not is_floating_dtype(res.dtype) spc_kwargs = {} out_space = type(self.space)(res.shape, res.dtype, @@ -1764,6 +1946,9 @@ class CtxNone(object): return out +# --- Implementations of low-level functions --- # + + def _blas_is_applicable(*args): """Whether BLAS routines can be applied or not. @@ -1774,8 +1959,8 @@ def _blas_is_applicable(*args): Parameters ---------- - x1,...,xN : `NumpyTensor` - The tensors to be tested for BLAS conformity. + x1,...,xN : `numpy.ndarray` + The arrays to be tested for BLAS conformity. Returns ------- @@ -1806,11 +1991,11 @@ def _lincomb_impl(a, x1, b, x2, out): if size < THRESHOLD_SMALL: # Faster for small arrays - out.data[:] = a * x1.data + b * x2.data + out[:] = a * x1 + b * x2 return elif (size < THRESHOLD_MEDIUM or - not _blas_is_applicable(x1.data, x2.data, out.data)): + not _blas_is_applicable(x1, x2, out)): def fallback_axpy(x1, x2, n, a): """Fallback axpy implementation avoiding copy.""" @@ -1831,185 +2016,94 @@ def fallback_copy(x1, x2, n): return x2 axpy, scal, copy = (fallback_axpy, fallback_scal, fallback_copy) - x1_arr = x1.data - x2_arr = x2.data - out_arr = out.data else: # Need flat data for BLAS, otherwise in-place does not work. # Raveling must happen in fixed order for non-contiguous out, # otherwise 'A' is applied to arrays, which makes the outcome # dependent on their respective contiguousness. - if out.data.flags.f_contiguous: + if out.flags.f_contiguous: ravel_order = 'F' else: ravel_order = 'C' - x1_arr = x1.data.ravel(order=ravel_order) - x2_arr = x2.data.ravel(order=ravel_order) - out_arr = out.data.ravel(order=ravel_order) + x1 = x1.ravel(order=ravel_order) + x2 = x2.ravel(order=ravel_order) + out = out.ravel(order=ravel_order) axpy, scal, copy = scipy.linalg.blas.get_blas_funcs( - ['axpy', 'scal', 'copy'], arrays=(x1_arr, x2_arr, out_arr)) + ['axpy', 'scal', 'copy'], arrays=(x1, x2, out)) if x1 is x2 and b != 0: # x1 is aligned with x2 -> out = (a+b)*x1 _lincomb_impl(a + b, x1, 0, x1, out) elif out is x1 and out is x2: # All the vectors are aligned -> out = (a+b)*out - scal(a + b, out_arr, size) + scal(a + b, out, size) elif out is x1: # out is aligned with x1 -> out = a*out + b*x2 if a != 1: - scal(a, out_arr, size) + scal(a, out, size) if b != 0: - axpy(x2_arr, out_arr, size, b) + axpy(x2, out, size, b) elif out is x2: # out is aligned with x2 -> out = a*x1 + b*out if b != 1: - scal(b, out_arr, size) + scal(b, out, size) if a != 0: - axpy(x1_arr, out_arr, size, a) + axpy(x1, out, size, a) else: # We have exhausted all alignment options, so x1 is not x2 is not out # We now optimize for various values of a and b if b == 0: if a == 0: # Zero assignment -> out = 0 - out_arr[:] = 0 + out[:] = 0 else: # Scaled copy -> out = a*x1 - copy(x1_arr, out_arr, size) + copy(x1, out, size) if a != 1: - scal(a, out_arr, size) + scal(a, out, size) else: # b != 0 if a == 0: # Scaled copy -> out = b*x2 - copy(x2_arr, out_arr, size) + copy(x2, out, size) if b != 1: - scal(b, out_arr, size) + scal(b, out, size) elif a == 1: # No scaling in x1 -> out = x1 + b*x2 - copy(x1_arr, out_arr, size) - axpy(x2_arr, out_arr, size, b) + copy(x1, out, size) + axpy(x2, out, size, b) else: # Generic case -> out = a*x1 + b*x2 - copy(x2_arr, out_arr, size) + copy(x2, out, size) if b != 1: - scal(b, out_arr, size) - axpy(x1_arr, out_arr, size, a) - - -def _weighting(weights, exponent): - """Return a weighting whose type is inferred from the arguments.""" - if np.isscalar(weights): - weighting = NumpyTensorSpaceConstWeighting(weights, exponent) - elif weights is None: - weighting = NumpyTensorSpaceConstWeighting(1.0, exponent) - else: # last possibility: make an array - arr = np.asarray(weights) - weighting = NumpyTensorSpaceArrayWeighting(arr, exponent) - return weighting + scal(b, out, size) + axpy(x1, out, size, a) -def npy_weighted_inner(weights): - """Weighted inner product on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the inner product. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - - Returns - ------- - inner : `callable` - Inner product function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=2.0).inner - - -def npy_weighted_norm(weights, exponent=2.0): - """Weighted norm on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the norm. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - exponent : positive `float` - Exponent of the norm. - - Returns - ------- - norm : `callable` - Norm function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=exponent).norm - - -def npy_weighted_dist(weights, exponent=2.0): - """Weighted distance on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the distance. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - exponent : positive `float` - Exponent of the norm. - - Returns - ------- - dist : `callable` - Distance function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=exponent).dist - - -def _norm_default(x): +def _norm_impl(x): """Default Euclidean norm implementation.""" # Lazy import to improve `import odl` time import scipy.linalg - if _blas_is_applicable(x.data): + if _blas_is_applicable(x): nrm2 = scipy.linalg.blas.get_blas_funcs('nrm2', dtype=x.dtype) norm = partial(nrm2, n=native(x.size)) else: norm = np.linalg.norm - return norm(x.data.ravel()) + return norm(x.ravel()) -def _pnorm_default(x, p): +def _pnorm_impl(x, p): """Default p-norm implementation.""" - return np.linalg.norm(x.data.ravel(), ord=p) + return np.linalg.norm(x.ravel(), ord=p) -def _pnorm_diagweight(x, p, w): +def _pnorm_diagweight_impl(x, p, w): """Diagonally weighted p-norm implementation.""" # Ravel both in the same order (w is a numpy array) - order = 'F' if all(a.flags.f_contiguous for a in (x.data, w)) else 'C' + order = 'F' if all(a.flags.f_contiguous for a in (x, w)) else 'C' # This is faster than first applying the weights and then summing with # BLAS dot or nrm2 - xp = np.abs(x.data.ravel(order)) + xp = np.abs(x.ravel(order)) if p == float('inf'): xp *= w.ravel(order) return np.max(xp) @@ -2019,10 +2113,10 @@ def _pnorm_diagweight(x, p, w): return np.sum(xp) ** (1 / p) -def _inner_default(x1, x2): +def _inner_impl(x1, x2): """Default Euclidean inner product implementation.""" # Ravel both in the same order - order = 'F' if all(a.data.flags.f_contiguous for a in (x1, x2)) else 'C' + order = 'F' if all(a.flags.f_contiguous for a in (x1, x2)) else 'C' if is_real_dtype(x1.dtype): if x1.size > THRESHOLD_MEDIUM: @@ -2030,16 +2124,101 @@ def _inner_default(x1, x2): return np.tensordot(x1, x2, [range(x1.ndim)] * 2) else: # Several times faster for small arrays - return np.dot(x1.data.ravel(order), - x2.data.ravel(order)) + return np.dot(x1.ravel(order), x2.ravel(order)) else: # x2 as first argument because we want linearity in x1 - return np.vdot(x2.data.ravel(order), - x1.data.ravel(order)) + return np.vdot(x2.ravel(order), x1.ravel(order)) + +# --- Weightings --- # -# TODO: implement intermediate weighting schemes with arrays that are -# broadcast, i.e. between scalar and full-blown in dimensionality? + +def slice_weighting(weighting, space_shape, indices): + """Return a weighting for a space after indexing. + + The different types of weightings behave as follows: + + - ``ConstWeighting``: preserved + - ``PerAxisWeighting``: preserved, where factors are discarded for + removed axes and sliced for other axes in which an array is used + - ``ArrayWeighting``: preserved, using the sliced array for the + new weighting + - Other: not preserved, mapped to ``None``. + """ + indices = normalized_index_expression(indices, space_shape) + # Use `None`-free `indices` to simulate slicing + indices_no_none = [i for i in indices if i is not None] + new_shape, removed_axes, _ = simulate_slicing(space_shape, indices_no_none) + + if isinstance(weighting, NumpyTensorSpaceConstWeighting): + new_weighting = weighting + + elif isinstance(weighting, NumpyTensorSpacePerAxisWeighting): + # Determine factors without `None` components + factors = [] + for i, (fac, idx) in enumerate(zip_longest(weighting.factors, + indices_no_none, + fillvalue=slice(None))): + if i in removed_axes: + continue + + if fac.ndim == 0: + factors.append(fac) + else: + factors.append(fac[idx]) + + # Add 1.0 where `None` is in `indices` + for i, idx in enumerate(indices): + if idx is None: + factors.insert(i, 1.0) + + new_weighting = NumpyTensorSpacePerAxisWeighting( + factors, weighting.exponent) + + elif isinstance(weighting, NumpyTensorSpaceArrayWeighting): + array = weighting.array[indices] + new_weighting = NumpyTensorSpaceArrayWeighting( + array, weighting.exponent) + else: + new_weighting = None + + return new_weighting + + +def slice_weighting_by_axis(weighting, space_shape, indices): + """Return a weighting for a space after indexing by axis. + + The different types of weightings behave as follows: + + - ``ConstWeighting``: preserved + - ``PerAxisWeighting``: preserved, where factors are discarded for + removed axes, repeated for repeated axes, and set to 1 for new + axes + - ``ArrayWeighting``: not preserved, no meaningful way to slice by axis + - Other: not preserved, mapped to ``None``. + """ + try: + iter(indices) + except TypeError: + # Integer, slice or Ellipsis + indices = list(range(len(space_shape)))[indices] + if not isinstance(indices, list): + indices = [indices] + else: + indices = [int(i) for i in indices] + + if isinstance(weighting, NumpyTensorSpaceConstWeighting): + new_weighting = weighting + + elif isinstance(weighting, NumpyTensorSpacePerAxisWeighting): + factors = [weighting.factors[i] for i in indices] + new_weighting = NumpyTensorSpacePerAxisWeighting( + factors, weighting.exponent) + + else: + new_weighting = None + + return new_weighting class NumpyTensorSpaceArrayWeighting(ArrayWeighting): @@ -2110,9 +2289,12 @@ def __init__(self, array, exponent=2.0): is not checked during initialization. """ if isinstance(array, NumpyTensor): - array = array.data - elif not isinstance(array, np.ndarray): - array = np.asarray(array) + array = array + + array = np.asarray(array) + if array.dtype == object: + raise ValueError('got invalid `array` as input') + super(NumpyTensorSpaceArrayWeighting, self).__init__( array, impl='numpy', exponent=exponent) @@ -2138,7 +2320,7 @@ def inner(self, x1, x2): 'exponent != 2 (got {})' ''.format(self.exponent)) else: - inner = _inner_default(x1 * self.array, x2) + inner = _inner_impl(x1.data * self.array, x2.data) if is_real_dtype(x1.dtype): return float(inner) else: @@ -2158,12 +2340,286 @@ def norm(self, x): The norm of the provided tensor. """ if self.exponent == 2.0: - norm_squared = self.inner(x, x).real # TODO: optimize?! + norm_squared = self.inner(x, x).real # TODO: optimize? if norm_squared < 0: norm_squared = 0.0 # Compensate for numerical error return float(np.sqrt(norm_squared)) else: - return float(_pnorm_diagweight(x, self.exponent, self.array)) + return float(_pnorm_diagweight_impl(x.data, self.exponent, + self.array)) + + +class NumpyTensorSpacePerAxisWeighting(PerAxisWeighting): + + """Weighting of a space with one weight per axis. + + See Notes for mathematical details. + """ + + def __init__(self, factors, exponent=2.0): + """Initialize a new instance. + + Parameters + ---------- + factors : sequence of `array-like` + Weighting factors, one per axis. The factors can be constants + or one-dimensional array-like objects. + exponent : positive float, optional + Exponent of the norm. For values other than 2.0, the inner + product is not defined. + + Notes + ----- + We consider a tensor space :math:`\mathbb{F}^n` of shape + :math:`n \in \mathbb{N}^d` over a field :math:`\mathbb{F}`. + If :math:`0 < v^{(i)} \in \mathbb{R}^{n_i}` are vectors of length + equal to the corresponding axis, their outer product + + .. math:: + v_{k} = \prod_{i=0}^d v^{(i)}_{k_i} + + defines a tensor of shape :math:`n`. With this tensor + we can define a weighted space as follows. + + - For exponent 2.0, a new weighted inner product is given as + + .. math:: + \\langle a, b\\rangle_v := + \\langle v \odot a, b\\rangle = + b^{\mathrm{H}} (v \odot a), + + where :math:`b^{\mathrm{H}}` stands for transposed complex + conjugate and ":math:`\odot`" for pointwise product. + + - For other exponents, only norm and dist are defined. In the + case of exponent :math:`\\infty`, the weighted norm is defined + as + + .. math:: + \| a \|_{v, \\infty} := \| a \|_{\\infty}, + + otherwise it is + + .. math:: + \| a \|_{v, p} := \| v^{1/p} \odot a \|_{p}. + + Note that this definition is chosen such that the limit + property in :math:`p` holds, i.e. + + .. math:: + \| a\|_{v, p} \\to + \| a \|_{v, \\infty} \quad (p \\to \\infty). + """ + # TODO: allow 3-tuples for `bdry, inner, bdry` type factors + conv_factors = [] + for factor in factors: + factor = np.asarray(factor) + if factor.ndim not in (0, 1): + raise ValueError( + '`factors` must all be scalar or 1-dim. vectors, got ' + '{}-dim. entries'.format(factor.ndim)) + + conv_factors.append(factor) + + super(NumpyTensorSpacePerAxisWeighting, self).__init__( + conv_factors, impl='numpy', exponent=exponent) + + @property + def const_axes(self): + """Tuple of indices in which the factors are constants.""" + return tuple(i for i, fac in enumerate(self.factors) if fac.ndim == 0) + + @property + def consts(self): + """Tuple containing those factors that are constants.""" + return tuple(fac for fac in self.factors if fac.ndim == 0) + + @property + def const(self): + """For one constant factor, this is the unwrapped factor.""" + if len(self.factors) == 1 and len(self.consts) == 1: + return self.consts[0] + else: + raise TypeError('`const` only defined for a single scalar factor') + + @property + def array_axes(self): + """Tuple of indices in which the factors are arrays.""" + return tuple(i for i, fac in enumerate(self.factors) if fac.ndim == 1) + + @property + def arrays(self): + """Tuple containing those factors that are arrays.""" + return tuple(fac for fac in self.factors if fac.ndim == 1) + + def inner(self, x1, x2): + """Return the weighted inner product of ``x1`` and ``x2``. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Tensors whose inner product is calculated. + + Returns + ------- + inner : float or complex + The inner product of the two provided tensors. + """ + if self.exponent != 2.0: + raise NotImplementedError('no inner product defined for ' + 'exponent != 2 (got {})' + ''.format(self.exponent)) + + const = np.prod(self.consts) + x1_w = fast_1d_tensor_mult(x1.data, self.arrays, axes=self.array_axes) + inner = const * _inner_impl(x1_w, x2.data) + if x1.space.field is None: + return inner + else: + return x1.space.field.element(inner) + + def norm(self, x): + """Return the weighted norm of ``x``. + + Parameters + ---------- + x1 : `NumpyTensor` + Tensor whose norm is calculated. + + Returns + ------- + norm : float + The norm of the tensor. + """ + const = np.prod(self.consts) + + if self.exponent == 2.0: + arrays = [np.sqrt(arr) for arr in self.arrays] + x_w = fast_1d_tensor_mult(x.data, arrays, axes=self.array_axes) + return float(np.sqrt(const) * _norm_impl(x_w)) + elif self.exponent == float('inf'): + return float(_pnorm_impl(x.data, float('inf'))) + else: + arrays = [np.power(arr, 1 / self.exponent) for arr in self.arrays] + x_w = fast_1d_tensor_mult(x.data, arrays, axes=self.array_axes) + return float(const ** (1 / self.exponent) * + _pnorm_impl(x_w, self.exponent)) + + def dist(self, x1, x2): + """Return the weighted distance between ``x1`` and ``x2``. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Tensors whose mutual distance is calculated. + + Returns + ------- + dist : float + The distance between the tensors. + """ + const = np.prod(self.consts) + diff = (x1 - x2).data + + if self.exponent == 2.0: + arrays = [np.sqrt(arr) for arr in self.arrays] + fast_1d_tensor_mult(diff, arrays, axes=self.array_axes, out=diff) + return float(np.sqrt(const) * _norm_impl(diff)) + elif self.exponent == float('inf'): + return float(_pnorm_impl(diff, float('inf'))) + else: + arrays = [np.power(arr, 1 / self.exponent) for arr in self.arrays] + fast_1d_tensor_mult(diff, arrays, axes=self.array_axes, out=diff) + return float(const ** (1 / self.exponent) * + _pnorm_impl(diff, self.exponent)) + + def __eq__(self, other): + """Return ``self == other``. + + Returns + ------- + equal : bool + ``True`` if ``other`` is a `NumpyTensorSpacePerAxisWeighting` + instance with the same factors (equal for constants, + *identical* for arrays), ``False`` otherwise. + """ + if other is self: + return True + + if isinstance(other, ConstWeighting): + # Consider per-axis weighting in 1 axis with a constant to + # be equal to constant weighting + return (len(self.factors) == 1 and + self.factors[0].ndim == 0 and + self.factors[0] == other.const) + elif not isinstance(other, NumpyTensorSpacePerAxisWeighting): + return False + + same_const_idcs = (other.const_axes == self.const_axes) + consts_equal = (self.consts == other.consts) + arrs_ident = (a is b for a, b in zip(self.arrays, other.arrays)) + + return (super(NumpyTensorSpacePerAxisWeighting, self).__eq__(other) and + same_const_idcs and + consts_equal and + arrs_ident) + + def __hash__(self): + """Return ``hash(self)``.""" + return hash( + (super(NumpyTensorSpacePerAxisWeighting, self).__hash__(),) + + tuple(fac.tobytes() for fac in self.factors)) + + @property + def repr_part(self): + """String usable in a space's ``__repr__`` method.""" + max_elems = 2 * np.get_printoptions()['edgeitems'] + + def factors_repr(factors): + factor_strs = [] + for fac in factors: + if fac.ndim == 0: + factor_strs.append('{:.4}'.format(float(fac))) + else: + factor_strs.append(array_str(fac, nprint=max_elems)) + if len(factor_strs) == 1: + return factor_strs[0] + else: + return '({})'.format(', '.join(factor_strs)) + + optargs = [] + optmod = [] + if not all(fac.ndim == 0 and fac == 1.0 for fac in self.factors): + optargs.append(('weighting', factors_repr(self.factors), '')) + optmod.append('!s') + + optargs.append(('exponent', self.exponent, 2.0)) + optmod.append('') + + return signature_string([], optargs, mod=[[], optmod]) + + def __repr__(self): + """Return ``repr(self)``.""" + max_elems = 2 * np.get_printoptions()['edgeitems'] + + def factors_repr(factors): + factor_strs = [] + for fac in factors: + if fac.ndim == 0: + factor_strs.append('{:.4}'.format(float(fac))) + else: + factor_strs.append(array_str(fac, nprint=max_elems)) + return '({})'.format(', '.join(factor_strs)) + + posargs = [factors_repr(self.factors)] + optargs = [('exponent', self.exponent, 2.0)] + + inner_str = signature_string(posargs, optargs, mod=['!s', '']) + return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str)) + + def __str__(self): + """Return ``str(self)``.""" + return repr(self) class NumpyTensorSpaceConstWeighting(ConstWeighting): @@ -2243,12 +2699,12 @@ def inner(self, x1, x2): raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) + + inner = self.const * _inner_impl(x1.data, x2.data) + if x1.space.field is None: + return inner else: - inner = self.const * _inner_default(x1, x2) - if x1.space.field is None: - return inner - else: - return x1.space.field.element(inner) + return x1.space.field.element(inner) def norm(self, x): """Return the weighted norm of ``x``. @@ -2264,12 +2720,12 @@ def norm(self, x): The norm of the tensor. """ if self.exponent == 2.0: - return float(np.sqrt(self.const) * _norm_default(x)) + return float(np.sqrt(self.const) * _norm_impl(x.data)) elif self.exponent == float('inf'): - return float(self.const * _pnorm_default(x, self.exponent)) + return float(_pnorm_impl(x.data, self.exponent)) else: - return float((self.const ** (1 / self.exponent) * - _pnorm_default(x, self.exponent))) + return float(self.const ** (1 / self.exponent) * + _pnorm_impl(x.data, self.exponent)) def dist(self, x1, x2): """Return the weighted distance between ``x1`` and ``x2``. @@ -2285,12 +2741,12 @@ def dist(self, x1, x2): The distance between the tensors. """ if self.exponent == 2.0: - return float(np.sqrt(self.const) * _norm_default(x1 - x2)) + return float(np.sqrt(self.const) * _norm_impl((x1 - x2).data)) elif self.exponent == float('inf'): - return float(self.const * _pnorm_default(x1 - x2, self.exponent)) + return float(_pnorm_impl((x1 - x2).data, self.exponent)) else: return float((self.const ** (1 / self.exponent) * - _pnorm_default(x1 - x2, self.exponent))) + _pnorm_impl((x1 - x2).data, self.exponent))) class NumpyTensorSpaceCustomInner(CustomInner): diff --git a/odl/space/weighting.py b/odl/space/weighting.py index 8fcabbf50d8..14f68228e81 100644 --- a/odl/space/weighting.py +++ b/odl/space/weighting.py @@ -13,11 +13,12 @@ import numpy as np from odl.space.base_tensors import TensorSpace -from odl.util import array_str, signature_string, indent +from odl.util import ( + array_str, signature_string, indent, fast_1d_tensor_mult, writable_array) __all__ = ('MatrixWeighting', 'ArrayWeighting', 'ConstWeighting', - 'CustomInner', 'CustomNorm', 'CustomDist') + 'CustomInner', 'CustomNorm', 'CustomDist', 'adjoint_weightings') class Weighting(object): @@ -84,20 +85,6 @@ def __hash__(self): """Return ``hash(self)``.""" return hash((type(self), self.impl, self.exponent)) - def equiv(self, other): - """Test if ``other`` is an equivalent weighting. - - Should be overridden, default tests for equality. - - Returns - ------- - equivalent : bool - ``True`` if ``other`` is a `Weighting` instance which - yields the same result as this inner product for any - input, ``False`` otherwise. - """ - return self == other - def inner(self, x1, x2): """Return the inner product of two elements. @@ -359,69 +346,6 @@ def __hash__(self): return hash((super(MatrixWeighting, self).__hash__(), self.matrix.tobytes())) - def equiv(self, other): - """Test if other is an equivalent weighting. - - Returns - ------- - equivalent : bool - ``True`` if ``other`` is a `Weighting` instance with the same - `Weighting.impl`, which yields the same result as this - weighting for any input, ``False`` otherwise. This is checked - by entry-wise comparison of matrices/arrays/constants. - """ - # Lazy import to improve `import odl` time - import scipy.sparse - - # Optimization for equality - if self == other: - return True - - elif self.exponent != getattr(other, 'exponent', -1): - return False - - elif isinstance(other, MatrixWeighting): - if self.matrix.shape != other.matrix.shape: - return False - - if scipy.sparse.isspmatrix(self.matrix): - if other.matrix_issparse: - # Optimization for different number of nonzero elements - if self.matrix.nnz != other.matrix.nnz: - return False - else: - # Most efficient out-of-the-box comparison - return (self.matrix != other.matrix).nnz == 0 - else: # Worst case: compare against dense matrix - return np.array_equal(self.matrix.todense(), other.matrix) - - else: # matrix of `self` is dense - if other.matrix_issparse: - return np.array_equal(self.matrix, other.matrix.todense()) - else: - return np.array_equal(self.matrix, other.matrix) - - elif isinstance(other, ArrayWeighting): - if scipy.sparse.isspmatrix(self.matrix): - return (np.array_equiv(self.matrix.diagonal(), - other.array) and - np.array_equal(self.matrix.asformat('dia').offsets, - np.array([0]))) - else: - return np.array_equal( - self.matrix, other.array * np.eye(self.matrix.shape[0])) - - elif isinstance(other, ConstWeighting): - if scipy.sparse.isspmatrix(self.matrix): - return (np.array_equiv(self.matrix.diagonal(), other.const) and - np.array_equal(self.matrix.asformat('dia').offsets, - np.array([0]))) - else: - return np.array_equal( - self.matrix, other.const * np.eye(self.matrix.shape[0])) - else: - return False - @property def repr_part(self): """Return a string usable in a space's ``__repr__`` method.""" @@ -527,30 +451,6 @@ def __hash__(self): return hash((super(ArrayWeighting, self).__hash__(), self.array.tobytes())) - def equiv(self, other): - """Return True if other is an equivalent weighting. - - Returns - ------- - equivalent : bool - ``True`` if ``other`` is a `Weighting` instance with the same - `Weighting.impl`, which yields the same result as this - weighting for any input, ``False`` otherwise. This is checked - by entry-wise comparison of arrays/constants. - """ - # Optimization for equality - if self == other: - return True - elif (not isinstance(other, Weighting) or - self.exponent != other.exponent): - return False - elif isinstance(other, MatrixWeighting): - return other.equiv(self) - elif isinstance(other, ConstWeighting): - return np.array_equiv(self.array, other.const) - else: - return np.array_equal(self.array, other.array) - @property def repr_part(self): """String usable in a space's ``__repr__`` method.""" @@ -572,6 +472,40 @@ def __str__(self): return repr(self) +class PerAxisWeighting(Weighting): + + """Weighting of a space with one weight per axis.""" + + def __init__(self, factors, impl, exponent=2.0): + """Initialize a new instance. + + Parameters + ---------- + factors : sequence of `array-like` + Weighting factors, one per axis. The factors can be constants + or one-dimensional array-like objects. + impl : string + Specifier for the implementation backend. + exponent : positive float, optional + Exponent of the norm. For values other than 2.0, the inner + product is not defined. + """ + super(PerAxisWeighting, self).__init__(impl=impl, exponent=exponent) + try: + iter(factors) + except TypeError: + raise TypeError('`factors` {!r} is not a sequence'.format(factors)) + self.__factors = tuple(factors) + + @property + def factors(self): + """Tuple of weighting factors for inner product, norm and distance.""" + return self.__factors + + # No further methods implemented here since that will require some + # knowledge on the array type + + class ConstWeighting(Weighting): """Weighting of a space by a constant.""" @@ -622,24 +556,6 @@ def __hash__(self): """Return ``hash(self)``.""" return hash((super(ConstWeighting, self).__hash__(), self.const)) - def equiv(self, other): - """Test if other is an equivalent weighting. - - Returns - ------- - equivalent : bool - ``True`` if other is a `Weighting` instance with the same - `Weighting.impl`, which yields the same result as this - weighting for any input, ``False`` otherwise. This is checked - by entry-wise comparison of matrices/arrays/constants. - """ - if isinstance(other, ConstWeighting): - return self == other - elif isinstance(other, (ArrayWeighting, MatrixWeighting)): - return other.equiv(self) - else: - return False - @property def repr_part(self): """String usable in a space's ``__repr__`` method.""" @@ -871,6 +787,207 @@ def __repr__(self): return '{}({})'.format(self.__class__.__name__, inner_str) +def adjoint_weightings(adj_domain, adj_range, extra_factor=1.0, + merge_1d_arrs=False): + """Return functions that together implement correct adjoint weighting. + + This function determines the (presumably) best strategy to merge + weights and distribute the constant, depending on the involved + weighting types and space sizes. + + Generally, the domain weighting factors are multiplied with, and the + range weighting factors appear as reciprocals. + + The optimization logic is thus as follows: + + 1. For both domain and range, constant factors, 1D arrays and nD arrays + are determined. + 2. The constants are merged into ``const = dom_const / ran_const``. + 3. If either domain or range have 1D weighting arrays, the constant + is merged into the smallest of those. + Otherwise, the constant is used as multiplicative factor in the + smaller of the two spaces. + 4. If desired, 1D arrays are merged and used on the smaller space. + 5. Two functions are returned that implement the optimized domain + and range weightings. They have ``out`` parameters that let them + operate in-place if necessary. + + Parameters + ---------- + adj_domain, adj_range : `TensorSpace` + Domain and range of an adjoint operator for which the weighting + strategy should be determined. + extra_factor : float, optional + Multiply the determined weighting constant by this value. This is for + situations where additional scalar corrections are necessary. + merge_1d_arrs : bool, optional + If ``True``, try to merge the 1D arrays in axes where they occur + in both domain and range. This is intended as an optimization for + operators that have identical (or compatible) domain and range, + at least in the axes in question. + + Returns + ------- + apply_dom_weighting, apply_ran_weighting : function + Python functions with signature ``apply(x, out=None)`` that + implement the optimized strategy for domain and range, returning + a Numpy array. If a function does not do anything, it returns its + input ``x`` as Numpy array without making a copy. + """ + dom_w = adj_domain.weighting + ran_w = adj_range.weighting + dom_size = adj_domain.size + ran_size = adj_range.size + const = extra_factor + + # Get relevant factors from the weightings + + # Domain weightings go in as multiplicative factors + if isinstance(dom_w, ArrayWeighting): + const *= 1.0 + dom_ndarr = dom_w.array + dom_1darrs = [] + dom_1darr_axes = [] + elif isinstance(dom_w, PerAxisWeighting): + const *= np.prod(dom_w.consts) + dom_ndarr = None + dom_1darrs = list(dom_w.arrays) + dom_1darr_axes = list(dom_w.array_axes) + elif isinstance(dom_w, ConstWeighting): + const *= dom_w.const + dom_ndarr = None + dom_1darrs = [] + dom_1darr_axes = [] + else: + raise TypeError('weighting type {} of the adjoint domain not ' + 'supported'.format(type(dom_w))) + + # Range weightings go in as reciprocal factors. For the later merging + # of constants and arrays we proceed with the reciprocal factors. + if isinstance(ran_w, ArrayWeighting): + const /= 1.0 + ran_ndarr = ran_w.array + ran_1darrs_recip = [] + ran_1darr_axes = [] + elif isinstance(ran_w, PerAxisWeighting): + const /= np.prod(ran_w.consts) + ran_ndarr = None + ran_1darrs_recip = [1 / arr for arr in ran_w.arrays] + ran_1darr_axes = list(ran_w.array_axes) + elif isinstance(ran_w, ConstWeighting): + const /= ran_w.const + ran_ndarr = None + ran_1darrs_recip = [] + ran_1darr_axes = [] + else: + raise TypeError('weighting type {} of the adjoint range not ' + 'supported'.format(type(ran_w))) + + # Put constant where it "hurts least", i.e., to the smaller space if + # both use constant weighting or if an array weighting is involved; + # if 1d arrays are in play, multiply the constant with one of them. + if const == 1.0: + dom_const = ran_const = 1.0 + elif dom_1darrs != []: + idx_smallest = np.argmin([len(arr) for arr in dom_1darrs]) + dom_1darrs[idx_smallest] = dom_1darrs[idx_smallest] * const + dom_const = ran_const = 1.0 + elif ran_1darrs_recip != []: + idx_smallest = np.argmin([len(arr) for arr in ran_1darrs_recip]) + ran_1darrs_recip[idx_smallest] *= const # has been copied already + dom_const = ran_const = 1.0 + else: + if dom_size < ran_size: + dom_const = const + ran_const = 1.0 + else: + dom_const = 1.0 + ran_const = const + + # If desired, merge 1d arrays; this works if domain and range have the + # same shape + if merge_1d_arrs: + if adj_domain.ndim != adj_range.ndim: + raise ValueError('merging only works with domain and range of ' + 'the same number of dimensions') + + for i in range(adj_domain.ndim): + if i in dom_1darr_axes and i in ran_1darr_axes: + if dom_size < ran_size: + dom_1darrs[i] = dom_1darrs[i] * ran_1darrs_recip[i] + ran_1darrs_recip.pop(ran_1darr_axes.index(i)) + ran_1darr_axes.remove(i) + else: + ran_1darrs_recip[i] *= dom_1darrs[i] # already copied + dom_1darrs.pop(dom_1darr_axes.index(i)) + dom_1darr_axes.remove(i) + + # Define weighting functions and return them + def apply_dom_weighting(x, out=None): + inp = x + if dom_ndarr is not None: + # TODO: use first variant when Numpy 1.13 is minimum + if isinstance(x, np.ndarray): + out = np.multiply(x, dom_ndarr, out=out) + else: + out = x.ufuncs.multiply(dom_ndarr, out=out) + inp = out + if dom_const != 1.0: + if isinstance(x, np.ndarray): + out = np.multiply(x, dom_const, out=out) + else: + out = inp.ufuncs.multiply(dom_const, out=out) + inp = out + if dom_1darrs != []: + if out is None: + out = fast_1d_tensor_mult(inp, dom_1darrs, dom_1darr_axes) + else: + # TODO: use impl when available + with writable_array(out) as out_arr: + fast_1d_tensor_mult(inp, dom_1darrs, dom_1darr_axes, + out=out_arr) + + if out is None: + # Nothing has been done, return input as Numpy array. This will + # not copy. + return np.asarray(x) + else: + return np.asarray(out) + + def apply_ran_weighting(x, out=None): + inp = x + if ran_ndarr is not None: + # TODO: use first variant when Numpy 1.13 is minimum + if isinstance(x, np.ndarray): + out = np.divide(x, ran_ndarr, out=out) + else: + out = x.ufuncs.divide(ran_ndarr, out=out) + inp = out + if ran_const != 1.0: + if isinstance(x, np.ndarray): + out = np.multiply(x, ran_const, out=out) + else: + out = inp.ufuncs.multiply(ran_const, out=out) + inp = out + if ran_1darrs_recip != []: + if out is None: + out = fast_1d_tensor_mult(inp, ran_1darrs_recip, + ran_1darr_axes) + else: + with writable_array(out) as out_arr: + fast_1d_tensor_mult(inp, ran_1darrs_recip, ran_1darr_axes, + out=out_arr) + + if out is None: + # Nothing has been done, return input as Numpy array. This will + # not copy. + return np.asarray(x) + else: + return np.asarray(out) + + return apply_dom_weighting, apply_ran_weighting + + if __name__ == '__main__': from odl.util.testutils import run_doctests run_doctests() diff --git a/odl/test/discr/lp_discr_test.py b/odl/test/discr/lp_discr_test.py index ddeb3f4a45a..a261d5e942c 100644 --- a/odl/test/discr/lp_discr_test.py +++ b/odl/test/discr/lp_discr_test.py @@ -15,7 +15,7 @@ from odl.discr.lp_discr import DiscreteLp, DiscreteLpElement from odl.space.base_tensors import TensorSpace from odl.space.npy_tensors import NumpyTensor -from odl.space.weighting import ConstWeighting +from odl.space.weighting import PerAxisWeighting from odl.util.testutils import ( almost_equal, all_equal, all_almost_equal, noise_elements, simple_fixture) @@ -30,6 +30,9 @@ power = simple_fixture('power', [1.0, 2.0, 0.5, -0.5, -1.0, -2.0]) shape = simple_fixture('shape', [(2, 3, 4), (3, 4), (2,), (1,), (1, 1, 1)]) power = simple_fixture('power', [1.0, 2.0, 0.5, -0.5, -1.0, -2.0]) +shaped_dtype = simple_fixture( + 'shaped_dtype', + [('float', ()), ('float32', (3,)), ('float', (2, 3))]) # --- DiscreteLp --- # @@ -86,28 +89,10 @@ def test_discretelp_init(): DiscreteLp(fspace, part_diffshp, tspace) # shape mismatch -def test_empty(): - """Check if empty spaces behave as expected and all methods work.""" - discr = odl.uniform_discr([], [], ()) - - assert discr.interp == 'nearest' - assert discr.axis_labels == () - assert discr.tangent_bundle == odl.ProductSpace(field=odl.RealNumbers()) - assert discr.complex_space == odl.uniform_discr([], [], (), dtype=complex) - hash(discr) - assert repr(discr) != '' - - elem = discr.element(1.0) - assert np.array_equal(elem.asarray(), 1.0) - assert np.array_equal(elem.real, 1.0) - assert np.array_equal(elem.imag, 0.0) - assert np.array_equal(elem.conj(), 1.0) - - # --- uniform_discr --- # -def test_factory_dtypes(tspace_impl): +def test_uniform_discr_dtypes(tspace_impl): real_float_dtypes = [np.float32, np.float64] nonfloat_dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64] @@ -189,6 +174,66 @@ def test_uniform_discr_init_complex(tspace_impl): assert discr.dtype == discr.tspace.default_dtype(odl.ComplexNumbers()) +def test_uniform_discr_empty(): + """Check if empty spaces behave as expected and all methods work.""" + discr = odl.uniform_discr([], [], ()) + + assert discr.interp == 'nearest' + assert discr.axis_labels == () + assert discr.tangent_bundle == odl.ProductSpace(field=odl.RealNumbers()) + assert discr.complex_space == odl.uniform_discr([], [], (), dtype=complex) + assert hash(discr) == hash(discr) + assert repr(discr) != '' + + elem = discr.element(1.0) + assert np.array_equal(elem.asarray(), 1.0) + assert np.array_equal(elem.real, 1.0) + assert np.array_equal(elem.imag, 0.0) + assert np.array_equal(elem.conj(), 1.0) + + +def test_uniform_discr_shaped_dtypes(shaped_dtype): + """Check handling of dtypes with shape in uniform_discr.""" + discr = odl.uniform_discr([0, 0], [1, 1], shape=(3, 4), dtype=shaped_dtype) + + # Check space shape + shaped_dtype = np.dtype(shaped_dtype) + assert discr.dtype == shaped_dtype + assert discr.scalar_dtype == shaped_dtype.base + assert discr.shape == shaped_dtype.shape + (3, 4) + + # Create new element + x = discr.zero() + assert x.shape == shaped_dtype.shape + (3, 4) + assert x in discr + assert x.tensor in discr.tspace + + # Create element from array + x = discr.element(np.ones(shaped_dtype.shape + (3, 4))) + assert x in discr + + # Create element from function + def f_scal(x): + return x[0] ** 2 + 1 + + def f_vec(x): + return [x[1] + 2, 1, x[0] + x[1]] + + def f_tens(x): + return [[0, x[0], x[1]], + [x[0] - x[1] ** 2, 1, x[1]]] + + if shaped_dtype.shape == (): + x = discr.element(f_scal) + elif shaped_dtype.shape == (3,): + x = discr.element(f_vec) + elif shaped_dtype.shape == (2, 3): + x = discr.element(f_tens) + else: + assert False + + assert x in discr + # --- DiscreteLp methods --- # @@ -205,7 +250,7 @@ def test_discretelp_element(): # 2D discr = odl.uniform_discr([0, 0], [1, 1], (3, 3)) - weight = 1.0 if exponent == float('inf') else discr.cell_volume + weight = 1.0 if exponent == float('inf') else discr.cell_sides tspace = odl.rn((3, 3), weighting=weight) elem = discr.element() assert elem in discr @@ -540,13 +585,13 @@ def test_getslice(): discr = odl.uniform_discr(0, 1, 3) elem = discr.element([1, 2, 3]) - assert isinstance(elem[:], NumpyTensor) + assert isinstance(elem[:], DiscreteLpElement) assert all_equal(elem[:], [1, 2, 3]) discr = odl.uniform_discr(0, 1, 3, dtype='complex') elem = discr.element([1 + 2j, 2 - 2j, 3]) - assert isinstance(elem[:], NumpyTensor) + assert isinstance(elem[:], DiscreteLpElement) assert all_equal(elem[:], [1 + 2j, 2 - 2j, 3]) @@ -625,15 +670,15 @@ def test_setitem_nd(): elem[:] = arr.T # bad shape (2, 3) # nD - shape = (3,) * 3 + (4,) * 3 - discr = odl.uniform_discr([0] * 6, [1] * 6, shape) + shape = (3, 4, 5) + discr = odl.uniform_discr([0] * 3, [1] * 3, shape) size = np.prod(shape) elem = discr.element(np.zeros(shape)) arr = np.arange(size).reshape(shape) elem[:] = arr - assert all_equal(elem, arr) + assert np.array_equal(elem, arr) elem[:] = 0 assert all_equal(elem, np.zeros(elem.shape)) @@ -1031,13 +1076,13 @@ def test_ufunc_corner_cases(tspace_impl): res = x.__array_ufunc__(np.add, 'reduce', x, axis=(0, 1)) assert res == pytest.approx(np.add.reduce(x.asarray(), axis=(0, 1))) - # Constant weighting should be preserved (recomputed from cell - # volume) + # Per-axis weighting should be sliced accordingly y = space.one() res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert res.space.weighting.const == pytest.approx(space.cell_sides[1]) + assert isinstance(res.space.weighting, PerAxisWeighting) + assert res.space.weighting.factors == pytest.approx([space.cell_sides[1]]) - # Check that `exponent` is propagated + # Check that `exponent` is being propagated space_1 = odl.uniform_discr([0, 0], [1, 1], (2, 3), impl=tspace_impl, exponent=1) z = space_1.one() @@ -1047,21 +1092,42 @@ def test_ufunc_corner_cases(tspace_impl): # --- `outer` --- # # Check that weightings are propagated correctly + # Same space -> concatenate factors x = y = space.one() res = x.__array_ufunc__(np.add, 'outer', x, y) - assert isinstance(res.space.weighting, ConstWeighting) - assert res.space.weighting.const == pytest.approx(x.space.weighting.const * - y.space.weighting.const) + assert isinstance(res.space.weighting, PerAxisWeighting) + true_factors = x.space.weighting.factors * 2 + assert res.space.weighting.factors == pytest.approx(true_factors) + # Other space 2D and const weighting != 1 -> const weighting 1 x = space.one() - y = space_no_w.one() + y = odl.uniform_discr([0, 0], [1, 1], (2, 3), weighting=0.5).one() res = x.__array_ufunc__(np.add, 'outer', x, y) - assert isinstance(res.space.weighting, ConstWeighting) - assert res.space.weighting.const == pytest.approx(x.space.weighting.const) + assert not res.space.is_weighted - x = y = space_no_w.one() + # Other space 2D and const weighting == 1 -> concatenate with per-axis 1 + x = space.one() + y = odl.uniform_discr([0, 0], [1, 1], (2, 3), weighting=1).one() res = x.__array_ufunc__(np.add, 'outer', x, y) - assert not res.space.is_weighted + assert isinstance(res.space.weighting, PerAxisWeighting) + true_factors = x.space.weighting.factors + (1, 1) + assert res.space.weighting.factors == pytest.approx(true_factors) + + # Other space 2D and per-axis weighhting -> concatenate + x = space.one() + y = odl.uniform_discr([0, 0], [1, 1], (2, 3), weighting=(0.5, 1.5)).one() + res = x.__array_ufunc__(np.add, 'outer', x, y) + assert isinstance(res.space.weighting, PerAxisWeighting) + true_factors = x.space.weighting.factors + y.space.weighting.factors + assert res.space.weighting.factors == pytest.approx(true_factors) + + # Other space 1D and const weighhting -> concatenate + x = space.one() + y = odl.uniform_discr(0, 1, 2, weighting=0.5).one() + res = x.__array_ufunc__(np.add, 'outer', x, y) + assert isinstance(res.space.weighting, PerAxisWeighting) + true_factors = x.space.weighting.factors + (y.space.weighting.const,) + assert res.space.weighting.factors == pytest.approx(true_factors) def test_real_imag(tspace_impl, elem_order): diff --git a/odl/test/space/tensors_test.py b/odl/test/space/tensors_test.py index 072614401a3..f8ddf218318 100644 --- a/odl/test/space/tensors_test.py +++ b/odl/test/space/tensors_test.py @@ -22,6 +22,7 @@ NumpyTensorSpaceConstWeighting, NumpyTensorSpaceArrayWeighting, NumpyTensorSpaceCustomInner, NumpyTensorSpaceCustomNorm, NumpyTensorSpaceCustomDist) +from odl.space.weighting import PerAxisWeighting from odl.util.testutils import ( all_almost_equal, all_equal, simple_fixture, noise_array, noise_element, noise_elements) @@ -182,7 +183,7 @@ def test_init_tspace_weighting(weight, exponent, tspace_impl): # Errors for bad input with pytest.raises(ValueError): - badly_sized = np.ones((2, 4)) + badly_sized = np.ones((4, 4)) odl.tensor_space((3, 4), weighting=badly_sized, impl=tspace_impl) if tspace_impl == 'numpy': @@ -708,7 +709,6 @@ def test_element_getitem(tspace_impl, getitem_indices): assert sliced_spc.shape == sliced_shape assert sliced_spc.dtype == space.dtype assert sliced_spc.exponent == space.exponent - assert sliced_spc.weighting == space.weighting # Check that we have a view that manipulates the original array # (or not, depending on indexing style) @@ -761,7 +761,9 @@ def test_element_getitem_bool_array(tspace_impl): assert sliced_spc.shape == x_arr_sliced.shape assert sliced_spc.dtype == space.dtype assert sliced_spc.exponent == space.exponent - assert sliced_spc.weighting == space.weighting + + +# TODO: test for weight propagation def test_element_setitem_bool_array(tspace_impl): @@ -1023,43 +1025,6 @@ def test_array_weighting_equals(tspace_impl): assert weighting_arr != weighting_other_exp -def test_array_weighting_equiv(tspace_impl): - """Test the equiv method of Numpy array weightings.""" - space = odl.rn(5, impl=tspace_impl) - weight_arr = _pos_array(space) - weight_elem = space.element(weight_arr) - different_arr = weight_arr + 1 - - arr_weighting_cls = _weighting_cls(tspace_impl, 'array') - w_arr = arr_weighting_cls(weight_arr) - w_elem = arr_weighting_cls(weight_elem) - w_different_arr = arr_weighting_cls(different_arr) - - # Equal -> True - assert w_arr.equiv(w_arr) - assert w_arr.equiv(w_elem) - # Different array -> False - assert not w_arr.equiv(w_different_arr) - - # Test shortcuts in the implementation - const_arr = np.ones(space.shape) * 1.5 - - const_weighting_cls = _weighting_cls(tspace_impl, 'const') - w_const_arr = arr_weighting_cls(const_arr) - w_const = const_weighting_cls(1.5) - w_wrong_const = const_weighting_cls(1) - w_wrong_exp = const_weighting_cls(1.5, exponent=1) - - assert w_const_arr.equiv(w_const) - assert not w_const_arr.equiv(w_wrong_const) - assert not w_const_arr.equiv(w_wrong_exp) - - # Bogus input - assert not w_const_arr.equiv(True) - assert not w_const_arr.equiv(object) - assert not w_const_arr.equiv(None) - - def test_array_weighting_inner(tspace): """Test inner product in a weighted space.""" [xarr, yarr], [x, y] = noise_elements(tspace, 2) @@ -1132,7 +1097,7 @@ def test_const_weighting_init(tspace_impl, exponent): def test_const_weighting_comparison(tspace_impl): - """Test equality to and equivalence with const weightings.""" + """Test equality to with const weightings.""" constant = 1.5 const_weighting_cls = _weighting_cls(tspace_impl, 'const') @@ -1151,23 +1116,12 @@ def test_const_weighting_comparison(tspace_impl): assert w_const == w_const assert w_const == w_const2 assert w_const2 == w_const - # Different but equivalent - assert w_const.equiv(w_const_arr) assert w_const != w_const_arr - # Not equivalent - assert not w_const.equiv(w_other_exp) assert w_const != w_other_exp - assert not w_const.equiv(w_other_const) assert w_const != w_other_const - assert not w_const.equiv(w_other_const_arr) assert w_const != w_other_const_arr - # Bogus input - assert not w_const.equiv(True) - assert not w_const.equiv(object) - assert not w_const.equiv(None) - def test_const_weighting_inner(tspace): """Test inner product with const weighting.""" @@ -1191,7 +1145,7 @@ def test_const_weighting_norm(tspace, exponent): constant = 1.5 if exponent == float('inf'): - factor = constant + factor = 1.0 else: factor = constant ** (1 / exponent) true_norm = factor * np.linalg.norm(xarr.ravel(), ord=exponent) @@ -1206,7 +1160,7 @@ def test_const_weighting_dist(tspace, exponent): constant = 1.5 if exponent == float('inf'): - factor = constant + factor = 1.0 else: factor = constant ** (1 / exponent) true_dist = factor * np.linalg.norm((xarr - yarr).ravel(), ord=exponent) @@ -1567,7 +1521,7 @@ def test_ufunc_corner_cases(tspace_impl): res = x.__array_ufunc__(np.add, 'reduce', x, axis=0, out=(out_ax0,)) assert all_almost_equal(out_ax0, np.add.reduce(x.asarray(), axis=0)) assert res is out_ax0 - out_ax1 = odl.rn(2, impl=tspace_impl).element() + out_ax1 = odl.rn(2).element() res = x.__array_ufunc__(np.add, 'reduce', x, axis=1, out=(out_ax1,)) assert all_almost_equal(out_ax1, np.add.reduce(x.asarray(), axis=1)) assert res is out_ax1 @@ -1576,17 +1530,14 @@ def test_ufunc_corner_cases(tspace_impl): res = x.__array_ufunc__(np.add, 'reduce', x, axis=(0, 1)) assert res == pytest.approx(np.add.reduce(x.asarray(), axis=(0, 1))) - # Cannot propagate weightings in a meaningful way, check that there are - # none in the result - y = space_const_w.one() - res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert not res.space.is_weighted - y = space_arr_w.one() + # Per-axis weighting should be sliced accordingly + y = odl.rn((2, 3), weighting=(3, 4)).one() res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert not res.space.is_weighted + assert isinstance(res.space.weighting, PerAxisWeighting) + assert res.space.weighting.factors == pytest.approx([4]) - # Check that `exponent` is propagated - space_1 = odl.rn((2, 3), exponent=1) + # Check that `exponent` is being propagated + space_1 = odl.rn((2, 3), impl=tspace_impl, exponent=1) z = space_1.one() res = z.__array_ufunc__(np.add, 'reduce', z, axis=0) assert res.space.exponent == 1 diff --git a/odl/test/trafos/backends/pywt_bindings_test.py b/odl/test/trafos/backends/pywt_bindings_test.py index aeedd15e0f4..4549b52a661 100644 --- a/odl/test/trafos/backends/pywt_bindings_test.py +++ b/odl/test/trafos/backends/pywt_bindings_test.py @@ -7,6 +7,7 @@ # obtain one at https://mozilla.org/MPL/2.0/. from __future__ import division +from numbers import Integral import pytest try: import pywt @@ -111,14 +112,17 @@ def _grouped_and_flat_arrays(shapes, dtype): i.e. the array with shape ``shapes[0]`` appears once, while the others appear ``2 ** ndim - 1`` times each. """ - space = odl.discr_sequence_space(shape=shapes[0], dtype=dtype) + shapes = [[shape] if isinstance(shape, Integral) else shape + for shape in shapes] + space = odl.uniform_discr([0] * len(shapes[0]), shapes[0], shapes[0], + dtype=dtype) array = noise_array(space).reshape(space.shape) grouped_list = [array] flat_list = [array.ravel()] ndim = space.ndim for shape in shapes[1:]: - space = odl.discr_sequence_space(shape=shape, dtype=dtype) + space = odl.uniform_discr([0] * len(shape), shape, shape, dtype=dtype) arrays = [noise_array(space).reshape(shape) for _ in range(2 ** ndim - 1)] grouped_list.append(tuple(arrays)) diff --git a/odl/test/trafos/fourier_test.py b/odl/test/trafos/fourier_test.py index 5850d088df9..cf924c9417b 100644 --- a/odl/test/trafos/fourier_test.py +++ b/odl/test/trafos/fourier_test.py @@ -53,12 +53,12 @@ def sinc(x): def test_dft_init(impl): # Just check if the code runs at all shape = (4, 5) - dom = odl.discr_sequence_space(shape) + dom = odl.uniform_discr([0, 0], shape, shape) dom_nonseq = odl.uniform_discr([0, 0], [1, 1], shape) - dom_f32 = odl.discr_sequence_space(shape, dtype='float32') - ran = odl.discr_sequence_space(shape, dtype='complex128') - ran_c64 = odl.discr_sequence_space(shape, dtype='complex64') - ran_hc = odl.discr_sequence_space((3, 5), dtype='complex128') + dom_f32 = odl.uniform_discr([0, 0], shape, shape, dtype='float32') + ran = odl.uniform_discr([0, 0], shape, shape, dtype=complex) + ran_c64 = odl.uniform_discr([0, 0], shape, shape, dtype='complex64') + ran_hc = odl.uniform_discr([0, 0], [2, 4], (3, 5), dtype=complex) # Implicit range DiscreteFourierTransform(dom, impl=impl) @@ -82,8 +82,8 @@ def test_dft_init(impl): def test_dft_init_raise(): # Test different error scenarios shape = (4, 5) - dom = odl.discr_sequence_space(shape) - dom_f32 = odl.discr_sequence_space(shape, dtype='float32') + dom = odl.uniform_discr([0, 0], shape, shape) + dom_f32 = odl.uniform_discr([0, 0], shape, shape, dtype='float32') # Bad types with pytest.raises(TypeError): @@ -103,36 +103,36 @@ def test_dft_init_raise(): DiscreteFourierTransform(dom, axes=(1, -3)) # Badly shaped range - bad_ran = odl.discr_sequence_space((3, 5), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (3, 5), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran) - bad_ran = odl.discr_sequence_space((10, 10), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (10, 10), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran) - bad_ran = odl.discr_sequence_space((4, 5), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 5), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran, halfcomplex=True) - bad_ran = odl.discr_sequence_space((4, 3), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 3), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran, halfcomplex=True, axes=(0,)) # Bad data types - bad_ran = odl.discr_sequence_space(shape, dtype='complex64') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 5), dtype='complex64') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran) - bad_ran = odl.discr_sequence_space(shape, dtype='float64') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 5), dtype='float64') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran) - bad_ran = odl.discr_sequence_space((4, 3), dtype='float64') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 3), dtype='float64') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran, halfcomplex=True) - bad_ran = odl.discr_sequence_space((4, 3), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 3), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom_f32, bad_ran, halfcomplex=True) @@ -144,25 +144,24 @@ def test_dft_init_raise(): def test_dft_range(): # 1d shape = 10 - dom = odl.discr_sequence_space(shape, dtype='complex128') + dom = odl.uniform_discr(0, shape, shape, dtype='complex128') fft = DiscreteFourierTransform(dom) - true_ran = odl.discr_sequence_space(shape, dtype='complex128') - assert fft.range == true_ran + assert fft.range == dom # 3d shape = (3, 4, 5) - ran = odl.discr_sequence_space(shape, dtype='complex64') - fft = DiscreteFourierTransform(ran) - true_ran = odl.discr_sequence_space(shape, dtype='complex64') - assert fft.range == true_ran + dom = odl.uniform_discr([0, 0, 0], shape, shape, dtype='complex64') + fft = DiscreteFourierTransform(dom) + assert fft.range == dom # 3d, with axes and halfcomplex shape = (3, 4, 5) axes = (-1, -2) ran_shape = (3, 3, 5) - dom = odl.discr_sequence_space(shape, dtype='float32') + dom = odl.uniform_discr([0, 0, 0], shape, shape, dtype='float32') fft = DiscreteFourierTransform(dom, axes=axes, halfcomplex=True) - true_ran = odl.discr_sequence_space(ran_shape, dtype='complex64') + true_ran = odl.uniform_discr([0, 0, 0], ran_shape, ran_shape, + dtype='complex64') assert fft.range == true_ran @@ -173,10 +172,10 @@ def test_idft_init(impl): # Just check if the code runs at all; this uses the init function of # DiscreteFourierTransform, so we don't need exhaustive tests here shape = (4, 5) - ran = odl.discr_sequence_space(shape, dtype='complex128') - ran_hc = odl.discr_sequence_space(shape, dtype='float64') - dom = odl.discr_sequence_space(shape, dtype='complex128') - dom_hc = odl.discr_sequence_space((3, 5), dtype='complex128') + ran = odl.uniform_discr([0, 0], shape, shape, dtype='complex128') + ran_hc = odl.uniform_discr([0, 0], shape, shape, dtype='float64') + dom = odl.uniform_discr([0, 0], shape, shape, dtype='complex128') + dom_hc = odl.uniform_discr([0, 0], [3, 5], (3, 5), dtype='complex128') # Implicit range DiscreteFourierTransformInverse(dom, impl=impl) @@ -191,7 +190,7 @@ def test_dft_call(impl): # 2d, complex, all ones and random back & forth shape = (4, 5) - dft_dom = odl.discr_sequence_space(shape, dtype='complex64') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='complex64') dft = DiscreteFourierTransform(domain=dft_dom, impl=impl) idft = DiscreteFourierTransformInverse(range=dft_dom, impl=impl) @@ -225,7 +224,7 @@ def test_dft_call(impl): # 2d, halfcomplex, first axis shape = (4, 5) axes = 0 - dft_dom = odl.discr_sequence_space(shape, dtype='float32') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='float32') dft = DiscreteFourierTransform(domain=dft_dom, impl=impl, halfcomplex=True, axes=axes) idft = DiscreteFourierTransformInverse(range=dft_dom, impl=impl, @@ -258,7 +257,7 @@ def test_dft_sign(impl): # 2d, complex, all ones and random back & forth shape = (4, 5) - dft_dom = odl.discr_sequence_space(shape, dtype='complex64') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='complex64') dft_minus = DiscreteFourierTransform(domain=dft_dom, impl=impl, sign='-') dft_plus = DiscreteFourierTransform(domain=dft_dom, impl=impl, sign='+') @@ -279,7 +278,7 @@ def test_dft_sign(impl): # 2d, halfcomplex, first axis shape = (4, 5) axes = (0,) - dft_dom = odl.discr_sequence_space(shape, dtype='float32') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='float32') arr = dft_dom.element([[0, 0, 0, 0, 0], [0, 0, 1, 1, 0], [0, 0, 1, 1, 0], @@ -303,7 +302,7 @@ def test_dft_init_plan(impl): # 2d, halfcomplex, first axis shape = (4, 5) axes = 0 - dft_dom = odl.discr_sequence_space(shape, dtype='float32') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='float32') dft = DiscreteFourierTransform(dft_dom, impl=impl, axes=axes, halfcomplex=True) diff --git a/odl/test/util/numerics_test.py b/odl/test/util/numerics_test.py index 231425e507b..edf47a70351 100644 --- a/odl/test/util/numerics_test.py +++ b/odl/test/util/numerics_test.py @@ -234,6 +234,11 @@ def simple_mult_3(x, y, z): fast_1d_tensor_mult(test_arr, [x, y, z], out=out) assert all_equal(out, true_result) + # Empty array sequence results in no-op + test_arr = np.ones(shape) + out = fast_1d_tensor_mult(test_arr, []) + assert all_equal(out, test_arr) + # Different orderings test_arr = np.ones(shape) out = fast_1d_tensor_mult(test_arr, [y, x, z], axes=(1, 0, 2)) @@ -290,10 +295,6 @@ def test_fast_1d_tensor_mult_error(): with pytest.raises(TypeError): fast_1d_tensor_mult([[0, 0], [0, 0]], [x, x]) - # No 1d arrays given - with pytest.raises(ValueError): - fast_1d_tensor_mult(test_arr, []) - # Length or dimension mismatch with pytest.raises(ValueError): fast_1d_tensor_mult(test_arr, [x, y]) diff --git a/odl/tomo/backends/astra_cpu.py b/odl/tomo/backends/astra_cpu.py index 3e437eb10c5..a7e307ff272 100644 --- a/odl/tomo/backends/astra_cpu.py +++ b/odl/tomo/backends/astra_cpu.py @@ -16,6 +16,7 @@ pass from odl.discr import DiscreteLp, DiscreteLpElement +from odl.space.weighting import adjoint_weightings from odl.tomo.backends.astra_setup import ( astra_projection_geometry, astra_volume_geometry, astra_data, astra_projector, astra_algorithm) @@ -121,11 +122,11 @@ def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None): Parameters ---------- proj_data : `DiscreteLpElement` - Projection data to which the back-projector is applied + Projection data to which the back-projector is applied. geometry : `Geometry` - Geometry defining the tomographic setup + Geometry defining the tomographic setup. reco_space : `DiscreteLp` - Space to which the calling operator maps + Space to which the calling operator maps. out : ``reco_space`` element, optional Element of the reconstruction space to which the result is written. If ``None``, an element in ``reco_space`` is created. @@ -170,9 +171,20 @@ def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None): vol_geom = astra_volume_geometry(reco_space) proj_geom = astra_projection_geometry(geometry) + # Get adjoint weighting functions + apply_dom_weighting, apply_ran_weighting = adjoint_weightings( + proj_data.space, reco_space) + + # Weight data out-of-place. If no weighting is applied, the returned + # element is aligned with the input. + proj_data_weighted = apply_dom_weighting(proj_data) + # If a copy was made, it's already contiguous and no copy should be + # needed. Otherwise, a copy may be required. + allow_copy = (proj_data_weighted is proj_data) + # Create ASTRA data structure - sino_id = astra_data(proj_geom, datatype='projection', data=proj_data, - allow_copy=True) + sino_id = astra_data(proj_geom, datatype='projection', + data=proj_data_weighted, allow_copy=allow_copy) # Create projector # TODO: implement with different schemes for angles and detector @@ -196,11 +208,8 @@ def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None): # Run algorithm astra.algorithm.run(algo_id) - # Weight the adjoint by appropriate weights - scaling_factor = float(proj_data.space.weighting.const) - scaling_factor /= float(reco_space.weighting.const) - - out *= scaling_factor + # Weight output in-place + apply_ran_weighting(out, out=out) # Delete ASTRA objects astra.algorithm.delete(algo_id) diff --git a/odl/tomo/backends/astra_cuda.py b/odl/tomo/backends/astra_cuda.py index c817f508278..c50f0b065ca 100644 --- a/odl/tomo/backends/astra_cuda.py +++ b/odl/tomo/backends/astra_cuda.py @@ -18,6 +18,7 @@ ASTRA_CUDA_AVAILABLE = False from odl.discr import DiscreteLp +from odl.space.weighting import adjoint_weightings from odl.tomo.backends.astra_setup import ( ASTRA_VERSION, astra_projection_geometry, astra_volume_geometry, astra_projector, @@ -246,25 +247,36 @@ def call_backward(self, proj_data, out=None): else: out = self.reco_space.element() - # Copy data to GPU memory + # Get adjoint weighting functions, using an extra correction + # factor accounting for inconsistencies in certain ASTRA versions + extra_factor = astra_cuda_bp_scaling_factor( + self.proj_space, self.reco_space, self.geometry) + apply_dom_weighting, apply_ran_weighting = adjoint_weightings( + self.proj_space, self.reco_space, extra_factor) + + # Copy weighted data to GPU memory if self.geometry.ndim == 2: - astra.data2d.store(self.sino_id, proj_data.asarray()) + proj_data_arr = apply_dom_weighting(proj_data) + astra.data2d.store(self.sino_id, proj_data_arr) elif self.geometry.ndim == 3: + # Flatten along angles shape = (-1,) + self.geometry.det_partition.shape reshaped_proj_data = proj_data.asarray().reshape(shape) + # Swap angle axis to the middle, always making a copy swapped_proj_data = np.ascontiguousarray( np.swapaxes(reshaped_proj_data, 0, 1)) + # In-place since input is not aliased with `proj_data` + apply_dom_weighting(swapped_proj_data, out=swapped_proj_data) astra.data3d.store(self.sino_id, swapped_proj_data) # Run algorithm astra.algorithm.run(self.algo_id) - # Copy result to CPU memory + # Get result from cache out[:] = self.out_array - # Fix scaling to weight by pixel/voxel size - out *= astra_cuda_bp_scaling_factor( - self.proj_space, self.reco_space, self.geometry) + # Apply reco space weighting + apply_ran_weighting(out, out=out) return out @@ -362,12 +374,9 @@ def astra_cuda_bp_scaling_factor(proj_space, reco_space, geometry): # Correct in case of non-weighted spaces proj_extent = float(proj_space.partition.extent.prod()) proj_size = float(proj_space.partition.size) - proj_weighting = proj_extent / proj_size + proj_cell_volume = proj_extent / proj_size - scaling_factor *= (proj_space.weighting.const / - proj_weighting) - scaling_factor /= (reco_space.weighting.const / - reco_space.cell_volume) + scaling_factor *= reco_space.cell_volume / proj_cell_volume if parse_version(ASTRA_VERSION) < parse_version('1.8rc1'): if isinstance(geometry, Parallel2dGeometry): diff --git a/odl/tomo/backends/skimage_radon.py b/odl/tomo/backends/skimage_radon.py index a39a5dac2eb..6badb87dd86 100644 --- a/odl/tomo/backends/skimage_radon.py +++ b/odl/tomo/backends/skimage_radon.py @@ -17,6 +17,7 @@ SKIMAGE_AVAILABLE = False from odl.discr import uniform_discr_frompartition, uniform_partition +from odl.space.weighting import adjoint_weightings __all__ = ('skimage_radon_forward', 'skimage_radon_back_projector', 'SKIMAGE_AVAILABLE') @@ -56,8 +57,8 @@ def clamped_interpolation(skimage_range, sinogram): def interpolation_wrapper(x): x = (x[0], np.maximum(min_x, np.minimum(max_x, x[1]))) - return sinogram.interpolation(x) + return interpolation_wrapper @@ -140,25 +141,25 @@ def skimage_radon_back_projector(sinogram, geometry, range, out=None): # Only do asserts here since these are backend functions assert out in range - # Rotate back from (rows, cols) to (x, y) - backproj = iradon(skimage_sinogram.asarray().T, theta, - output_size=range.shape[0], filter=None, circle=False) - out[:] = np.rot90(backproj, -1) - - # Empirically determined value, gives correct scaling + # Extra weighting factor needed for skimage-internal reasons scaling_factor = 4.0 * float(geometry.motion_params.length) / (2 * np.pi) + proj_cell_volume = (sinogram.space.partition.extent.prod() / + sinogram.space.partition.size) + scaling_factor *= range.cell_volume / proj_cell_volume + + # Get weighting functions + apply_dom_weighting, apply_ran_weighting = adjoint_weightings( + sinogram.space, range, extra_factor=scaling_factor) - # Correct in case of non-weighted spaces - proj_extent = float(sinogram.space.partition.extent.prod()) - proj_size = float(sinogram.space.partition.size) - proj_weighting = proj_extent / proj_size + # Scale sinogram if necessary + sinogram = apply_dom_weighting(sinogram) - scaling_factor *= (sinogram.space.weighting.const / - proj_weighting) - scaling_factor /= (range.weighting.const / - range.cell_volume) + # Apply backprojection and rotate back from (rows, cols) to (x, y) + backproj = iradon(skimage_sinogram.asarray().T, theta, + output_size=range.shape[0], filter=None, circle=False) + out[:] = np.rot90(backproj, -1) - # Correctly scale the output - out *= scaling_factor + # Scale output + apply_ran_weighting(out, out=out) return out diff --git a/odl/tomo/operators/ray_trafo.py b/odl/tomo/operators/ray_trafo.py index 84a5a46bf69..3b824b505c2 100644 --- a/odl/tomo/operators/ray_trafo.py +++ b/odl/tomo/operators/ray_trafo.py @@ -17,7 +17,7 @@ from odl.space import FunctionSpace from odl.tomo.geometry import ( Geometry, Parallel2dGeometry, Parallel3dAxisGeometry) -from odl.space.weighting import ConstWeighting +from odl.space.weighting import PerAxisWeighting from odl.tomo.backends import ( ASTRA_AVAILABLE, ASTRA_CUDA_AVAILABLE, SKIMAGE_AVAILABLE, astra_supports, ASTRA_VERSION, @@ -198,27 +198,11 @@ def __init__(self, reco_space, geometry, variant, **kwargs): if proj_space is None: dtype = reco_space.dtype proj_fspace = FunctionSpace(geometry.params, out_dtype=dtype) - - if not reco_space.is_weighted: - weighting = None - elif (isinstance(reco_space.weighting, ConstWeighting) and - np.isclose(reco_space.weighting.const, - reco_space.cell_volume)): - # Approximate cell volume - # TODO: find a way to treat angles and detector differently - # regarding weighting. While the detector should be uniformly - # discretized, the angles do not have to and often are not. - # The needed partition property is available since - # commit a551190d, but weighting is not adapted yet. - # See also issue #286 - extent = float(geometry.partition.extent.prod()) - size = float(geometry.partition.size) - weighting = extent / size - else: - raise NotImplementedError('unknown weighting of domain') - + # TODO: handle uniform angles with modified endpoints + # (scale projections in backproj) + proj_weighting = proj_space_weighting(reco_space, geometry) proj_tspace = reco_space.tspace_type(geometry.partition.shape, - weighting=weighting, + weighting=proj_weighting, dtype=dtype) if geometry.motion_partition.ndim == 0: @@ -528,6 +512,75 @@ def adjoint(self): return self._adjoint +def proj_space_weighting(reco_space, geometry): + """Determine a weighting for the range of a ray transform. + + If ``reco_space`` is weighted with the cell sides, and if ``geometry`` + represents an acquisition with a continuous curve or surface of motion, + then the projection space will be weighted per axis, where in + the angle axis (axes), non-uniform sampling is allowed and taken + into account, and in the detector axis (axes), uniform sampling is + mandatory. + For a 1D angle partition, the weights are given by :: + + angle_weights[1:-1] = (angles[2:] - angles[:-2]) / 2 + + in the inner part and + + angle_weights[0] = angles[1] - angles[2], + angle_weights[-1] = angles[-1] - angles[-2] + + at the boundaries. This corresponds to the summed trapezoidal rule + for non-uniform nodes. + + Parameters + ---------- + reco_space : `DiscreteLp` + Reconstruction space, domain of the `RayTransform`. + geometry : `Geometry` + Acquisition geometry to be used to create the `RayTransform`. + + Returns + ------- + proj_space_weighting : `~odl.space.weighting.Weighting` + Weighting determined from the parameters. If ``reco_space.weighting`` + is a `~odl.space.weighting.PerAxisWeighting` where the weight + factors correspond to the cell sides, the resulting weighting + is also a `PerAxisWeighting`, with factors + ``(angle_weights,) + tuple(geometry.det_partition.cell_sides)``. + + Otherwise, no weighting will be applied, and ``None`` is returned. + """ + if (isinstance(reco_space.weighting, PerAxisWeighting) and + reco_space.weighting.array_axes == () and + np.allclose(reco_space.weighting.consts, reco_space.cell_sides) and + hasattr(geometry, 'angles')): + + angle_vecs = geometry.motion_partition.coord_vectors + angle_cell_vecs = geometry.motion_partition.cell_sizes_vecs + angles_uniform = geometry.motion_partition.is_uniform_byaxis + weights = [] + + for i in range(len(angle_vecs)): + if angles_uniform[i]: + weights.append(angle_cell_vecs[i][0]) + else: + weight = np.empty_like(angle_vecs[i]) + weight[1:-1] = (angle_vecs[i][2:] - angle_vecs[i][:-2]) / 2 + weight[0] = angle_cell_vecs[i][0] + weight[-1] = angle_cell_vecs[i][-1] + + weights.append(weight) + + weights.extend(geometry.det_partition.cell_sides) + + # Use impl-specific weighting class + return type(reco_space.weighting)(weights) + + else: + return None + + if __name__ == '__main__': from odl.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/util/utility.py b/odl/tomo/util/utility.py index d1fb21406de..db0e3d2c99e 100644 --- a/odl/tomo/util/utility.py +++ b/odl/tomo/util/utility.py @@ -484,70 +484,6 @@ def transform_system(principal_vec, principal_default, other_vecs, return tuple(transformed_vecs) -def is_rotation_matrix(mat, show_diff=False): - from scipy.linalg import det, norm - - dim = mat.shape[0] - if dim != mat.shape[1]: - return False - - determ = det(mat) - right_handed = (np.abs(determ - 1.) < 1E-10) - orthonorm_diff = mat * mat.T - np.eye(dim) - diff_norm = norm(orthonorm_diff, 2) - orthonormal = (diff_norm < 1E-10) - if not right_handed or not orthonormal: - if show_diff: - print('matrix S:\n', mat) - print('det(S): ', determ) - print('S*S.T - eye:\n', orthonorm_diff) - print('2-norm of difference: ', diff_norm) - return False - return True - - -def angles_from_matrix(rot_matrix): - if rot_matrix.shape == (2, 2): - theta = np.atan2(rot_matrix[1, 0], rot_matrix[0, 0]) - return theta, - elif rot_matrix.shape == (3, 3): - if rot_matrix[2, 2] == 1.: # cannot use last row and column - theta = 0. - # upper-left block is 2d rotation for phi + psi, so one needs - # to be fixed - psi = 0. - phi = np.atan2(rot_matrix[1, 0], rot_matrix[0, 0]) - if phi < 0: - phi += 2 * np.pi # in [0, 2pi) - else: - phi = np.atan2(rot_matrix[0, 2], -rot_matrix[1, 2]) - psi = np.atan2(rot_matrix[2, 0], rot_matrix[2, 1]) - theta = np.acos(rot_matrix[2, 2]) - - if phi < 0. or psi < 0.: - phi += np.pi - psi += np.pi - theta = -theta - - return phi, theta, psi - - else: - raise ValueError('shape of `rot_matrix` must be (2, 2) or (3, 3), ' - 'got {}'.format(rot_matrix.shape)) - - -def to_lab_sys(vec_in_local_coords, local_sys): - vec_in_local_coords = np.array(vec_in_local_coords) - trafo_matrix = np.matrix(local_sys).T - return np.dot(trafo_matrix, vec_in_local_coords) - - -def to_local_sys(vec_in_lab_coords, local_sys): - vec_in_lab_coords = np.array(vec_in_lab_coords) - trafo_matrix = np.matrix(local_sys) - return np.dot(trafo_matrix, vec_in_lab_coords) - - def perpendicular_vector(vec): """Return a vector perpendicular to ``vec``. diff --git a/odl/trafos/fourier.py b/odl/trafos/fourier.py index e0b38d88d6f..31305be35d9 100644 --- a/odl/trafos/fourier.py +++ b/odl/trafos/fourier.py @@ -11,7 +11,7 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.discr import DiscreteLp, discr_sequence_space +from odl.discr import DiscreteLp, uniform_discr from odl.operator import Operator from odl.set import RealNumbers, ComplexNumbers from odl.trafos.backends.pyfftw_bindings import ( @@ -59,8 +59,8 @@ def __init__(self, inverse, domain, range=None, axes=None, sign='-', range : `DiscreteLp`, optional Range of the Fourier transform. If not given, the range is determined from ``domain`` and the other parameters as - a `discr_sequence_space` with exponent ``p / (p - 1)`` - (read as 'inf' for p=1 and 1 for p='inf'). + a `DiscreteLp` space with exponent ``p / (p - 1)`` + (read as 'inf' for p=1 and 1 for p='inf') and unit-sized cells. axes : int or sequence of ints, optional Dimensions in which a transform is to be calculated. ``None`` means all axes. @@ -123,9 +123,9 @@ def __init__(self, inverse, domain, range=None, axes=None, sign='-', if range is None: impl = domain.tspace.impl - range = discr_sequence_space( - ran_shape, ran_dtype, impl, - exponent=conj_exponent(domain.exponent)) + range = uniform_discr( + [0] * len(ran_shape), ran_shape, ran_shape, impl=impl, + dtype=ran_dtype, exponent=conj_exponent(domain.exponent)) else: if range.shape != ran_shape: raise ValueError('expected range shape {}, got {}.' @@ -391,8 +391,8 @@ def __init__(self, domain, range=None, axes=None, sign='-', range : `DiscreteLp`, optional Range of the Fourier transform. If not given, the range is determined from ``domain`` and the other parameters as - a `discr_sequence_space` with exponent ``p / (p - 1)`` - (read as 'inf' for p=1 and 1 for p='inf'). + a `DiscreteLp` space with exponent ``p / (p - 1)`` + (read as 'inf' for p=1 and 1 for p='inf') and unit-sized cells. axes : int or sequence of ints, optional Dimensions in which a transform is to be calculated. ``None`` means all axes. @@ -416,7 +416,7 @@ def __init__(self, domain, range=None, axes=None, sign='-', Complex-to-complex (default) transforms have the same grids in domain and range: - >>> domain = discr_sequence_space((2, 4)) + >>> domain = odl.uniform_discr([0, 0], [1, 3], (2, 4)) >>> fft = DiscreteFourierTransform(domain) >>> fft.domain.shape (2, 4) @@ -426,7 +426,8 @@ def __init__(self, domain, range=None, axes=None, sign='-', Real-to-complex transforms have a range grid with shape ``n // 2 + 1`` in the last tranform axis: - >>> domain = discr_sequence_space((2, 3, 4), dtype='float') + >>> domain = odl.uniform_discr([0, 0, 0], [1, 2, 3], (2, 3, 4), + ... dtype='float') >>> axes = (0, 1) >>> fft = DiscreteFourierTransform( ... domain, halfcomplex=True, axes=axes) @@ -542,8 +543,8 @@ def __init__(self, range, domain=None, axes=None, sign='+', domain : `DiscreteLp`, optional Domain of the inverse Fourier transform. If not given, the domain is determined from ``range`` and the other parameters - as a `discr_sequence_space` with exponent ``p / (p - 1)`` - (read as 'inf' for p=1 and 1 for p='inf'). + a `DiscreteLp` space with exponent ``p / (p - 1)`` + (read as 'inf' for p=1 and 1 for p='inf') and unit-sized cells. axes : sequence of ints, optional Dimensions in which a transform is to be calculated. `None` means all axes. @@ -567,7 +568,7 @@ def __init__(self, range, domain=None, axes=None, sign='+', Complex-to-complex (default) transforms have the same grids in domain and range: - >>> range_ = discr_sequence_space((2, 4)) + >>> range_ = odl.uniform_discr([0, 0], [1, 3], (2, 4)) >>> ifft = DiscreteFourierTransformInverse(range_) >>> ifft.domain.shape (2, 4) @@ -577,7 +578,8 @@ def __init__(self, range, domain=None, axes=None, sign='+', Complex-to-real transforms have a domain grid with shape ``n // 2 + 1`` in the last tranform axis: - >>> range_ = discr_sequence_space((2, 3, 4), dtype='float') + >>> range_ = odl.uniform_discr([0, 0, 0], [1, 2, 3], (2, 3, 4), + ... dtype='float') >>> axes = (0, 1) >>> ifft = DiscreteFourierTransformInverse( ... range_, halfcomplex=True, axes=axes) diff --git a/odl/util/normalize.py b/odl/util/normalize.py index daf09e48130..691f0e38b7e 100644 --- a/odl/util/normalize.py +++ b/odl/util/normalize.py @@ -9,6 +9,7 @@ """Utilities for normalization of user input.""" from __future__ import print_function, division, absolute_import +from numbers import Integral import numpy as np @@ -175,17 +176,20 @@ def normalized_index_expression(indices, shape, int_to_slice=False): Returns ------- normalized : tuple of ints or `slice`'s - Normalized index expression + Normalized index expression. Examples -------- - Sequences are turned into tuples. We can have at most as many entries - as the length of ``shape``, but fewer are allowed - the remaining + Sequences are turned into tuples, except for list of integers, which + are taken as indices for the first axis. We can have at most as many + entries as the length of ``shape``, but fewer are allowed - the remaining list places are filled up by ``slice(None)``: >>> normalized_index_expression([1, 2, 3], shape=(3, 4, 5)) + ([1, 2, 3], slice(None, None, None), slice(None, None, None)) + >>> normalized_index_expression((1, 2, 3), shape=(3, 4, 5)) (1, 2, 3) - >>> normalized_index_expression([1, 2], shape=(3, 4, 5)) + >>> normalized_index_expression((1, 2), shape=(3, 4, 5)) (1, 2, slice(None, None, None)) >>> normalized_index_expression([slice(2), 2], shape=(3, 4, 5)) (slice(None, 2, None), 2, slice(None, None, None)) @@ -199,14 +203,14 @@ def normalized_index_expression(indices, shape, int_to_slice=False): axes: >>> x = np.zeros(shape=(3, 4, 5)) - >>> idx1 = normalized_index_expression([1, 2, 3], shape=(3, 4, 5), + >>> idx1 = normalized_index_expression((1, 2, 3), shape=(3, 4, 5), ... int_to_slice=True) >>> idx1 (slice(1, 2, None), slice(2, 3, None), slice(3, 4, None)) >>> x[idx1] array([[[ 0.]]]) - >>> idx2 = normalized_index_expression([1, 2, 3], shape=(3, 4, 5), - ... int_to_slice=False) + >>> idx2 = normalized_index_expression((1, 2, 3), shape=(3, 4, 5), + ... int_to_slice=False) >>> idx2 (1, 2, 3) >>> x[idx2] @@ -215,46 +219,71 @@ def normalized_index_expression(indices, shape, int_to_slice=False): ndim = len(shape) # Support indexing with fewer indices as indexing along the first # corresponding axes. In the other cases, normalize the input. - if np.isscalar(indices): + if indices is None: + indices = [None, Ellipsis] + elif np.isscalar(indices): indices = [indices, Ellipsis] - elif (isinstance(indices, slice) or indices is Ellipsis): + elif (isinstance(indices, slice) or + indices is Ellipsis or + (isinstance(indices, list) and + all(isinstance(idx, Integral) for idx in indices))): indices = [indices] indices = list(indices) if len(indices) < ndim and Ellipsis not in indices: indices.append(Ellipsis) - # Turn Ellipsis into the correct number of slice(None) - if Ellipsis in indices: - if indices.count(Ellipsis) > 1: - raise ValueError('cannot use more than one Ellipsis.') + # Turn Ellipsis into the correct number of slice(None). We need a slightly + # more elaborate loop because we need to avoid comparing arrays to + # something using `==`, i.e., we cannot use `indices.count()` or + # `... in indices`. + ell_idx = None + for i, idx in enumerate(indices): + if idx is Ellipsis: + if ell_idx is not None: + raise ValueError('cannot use more than one Ellipsis.') + else: + ell_idx = i + + # Count number of new axes, they should not count towards the ellipsis + # axes + num_newaxes = 0 + for idx in indices: + if idx is None: + num_newaxes += 1 - eidx = indices.index(Ellipsis) - extra_dims = ndim - len(indices) + 1 - indices = (indices[:eidx] + [slice(None)] * extra_dims + - indices[eidx + 1:]) + if ell_idx is not None: + extra_dims = ndim - (len(indices) - num_newaxes) + 1 + indices = (indices[:ell_idx] + [slice(None)] * extra_dims + + indices[ell_idx + 1:]) # Turn single indices into length-1 slices if desired - for (i, idx), n in zip(enumerate(indices), shape): + dim = 0 + for i in range(len(indices)): + if indices[i] is None: + continue + + idx = indices[i] + n = shape[dim] if np.isscalar(idx): if idx < 0: idx += n if idx >= n: - raise IndexError('Index {} is out of bounds for axis ' + raise IndexError('index {} is out of bounds for axis ' '{} with size {}.' ''.format(idx, i, n)) if int_to_slice: indices[i] = slice(idx, idx + 1) + dim += 1 + # Catch most common errors if any(s.start == s.stop and s.start is not None or s.start == n for s, n in zip(indices, shape) if isinstance(s, slice)): - raise ValueError('Slices with empty axes not allowed.') - if None in indices: - raise ValueError('creating new axes is not supported.') - if len(indices) > ndim: + raise ValueError('slices with empty axes not allowed.') + if len(indices) - num_newaxes > ndim: raise IndexError('too may indices: {} > {}.' ''.format(len(indices), ndim)) diff --git a/odl/util/numerics.py b/odl/util/numerics.py index 9366b02625f..4b84dc84d0b 100644 --- a/odl/util/numerics.py +++ b/odl/util/numerics.py @@ -9,13 +9,15 @@ """Numerical helper functions for convenience or speed.""" from __future__ import print_function, division, absolute_import +from numbers import Integral import numpy as np -from odl.util.normalize import normalized_scalar_param_list, safe_int_conv +from odl.util.normalize import ( + normalized_scalar_param_list, safe_int_conv, normalized_index_expression) __all__ = ('apply_on_boundary', 'fast_1d_tensor_mult', 'resize_array', - 'zscore') + 'zscore', 'simulate_slicing') _SUPPORTED_RESIZE_PAD_MODES = ('constant', 'symmetric', 'periodic', @@ -224,10 +226,10 @@ def fast_1d_tensor_mult(ndarr, onedim_arrs, axes=None, out=None): if out is None: out = np.array(ndarr, copy=True) else: - out[:] = ndarr # Self-assignment is free if out is ndarr + out[:] = ndarr # Self-assignment is free if `out is ndarr` if not onedim_arrs: - raise ValueError('no 1d arrays given') + return out if axes is None: axes = list(range(out.ndim - len(onedim_arrs), out.ndim)) @@ -836,6 +838,162 @@ def zscore(arr): return arr +def simulate_slicing(shape, indices): + """Simulate slicing into a Numpy array with given indices. + + This function is intended to simulate indexing of a Numpy array + without actually performing the indexing. Its typical usage is to + determine the shape of a result space after indexing. + + The function works with + + - integers, + - `slice` objects, + - index arrays, i.e., 1D array-like objects containing integers, + + and combinations of the above. It does not work with boolean arrays + or more advanced "fancy indexing". + + Parameters + ---------- + shape : sequence of ints + Number of entries per axis in the "simulated" array. + indices : index expression + Single instance or sequence of integer, `slice` and/or `array-like` + objects that should be used to simulate indexing of an array. + If index arrays are used, they must all be one-dimensional and have + the same length. + + Returns + ------- + sliced_shape : tuple of ints + The shape after simulated indexing. + collapsed_axes : tuple of ints + The axes that will be gone after indexing. + leftmost_index_array_axis : int or None + Index of the leftmost index array in the provided ``indices``, + or ``None`` if there is no index array. + + Raises + ------ + TypeError + If an invalid type of indexing object is used. This can be caught + by upstream code to switch to lazy indexing. + + Examples + -------- + A single integer slices along the first axis (index does not + matter as long as it lies within the bounds): + + >>> shape = (3, 4, 5, 6) + >>> simulate_slicing(shape, 0) # arr[0] + ((4, 5, 6), (0,), None) + + Multiple indices slice into corresponding axes from the left: + + >>> simulate_slicing(shape, (0, 0)) # arr[0, 0] + ((5, 6), (0, 1), None) + >>> simulate_slicing(shape, (0, 1, 1)) # arr[0, 1, 1] + ((6,), (0, 1, 2), None) + + Ellipsis (``...``) and ``slice(None)`` (``:``) can be used to keep + one or several axes intact: + + >>> # arr[0, :, 1, :] + >>> simulate_slicing(shape, (0, np.s_[:], 1, np.s_[:])) + ((4, 6), (0, 2), None) + >>> simulate_slicing(shape, (np.s_[...], 0, 0)) # arr[..., 0, 0] + ((3, 4), (2, 3), None) + + With slices, parts of axes can be selected: + + >>> # arr[0, :3, 1:4, ::2] + >>> simulate_slicing(shape, (0, np.s_[:3], np.s_[1:4], np.s_[::2])) + ((3, 3, 3), (0,), None) + + Array-like objects (must all have the same 1D shape) of integers are + treated as follows: if their common length is ``n``, a new axis + of length ``n`` is created at the position of the leftmost index + array, and all index array axes are collapsed (Note: this is + not so useful for spaces, more so for elements): + + >>> # arr[0, 0, [0, 1, 0, 2], :] + >>> simulate_slicing(shape, (0, 0, [0, 1, 0, 2], np.s_[:])) + ((4, 6), (0, 1), 2) + >>> # arr[:, [1, 1], [0, 2], :] + >>> simulate_slicing(shape, (np.s_[:], [1, 1], [0, 2], np.s_[:])) + ((3, 2, 6), (2,), 1) + >>> # arr[[2, 0], [3, 3], [0, 1], [5, 2]] + >>> simulate_slicing(shape, ([2, 0], [3, 3], [0, 1], [5, 2])) + ((2,), (1, 2, 3), 0) + """ + if getattr(indices, 'dtype', object) == bool: + # Raising here as signal for upstream code. + # This is an optimization that avoids counting the number of + # `True` entries here *and* in the actual indexing operation. + raise TypeError('cannot index space with a boolean element or ' + 'array') + + indices = normalized_index_expression(indices, shape) + ndim = len(shape) + + # Go through the indices and process the index arrays, checking if + # they are all one-dimensional and have the same length. + # Note: this will not catch nested lists or tuples, since doing such + # a check could be expensive. + leftmost_index_array_index = None + index_array_len = None + for i in range(ndim): + if (isinstance(indices[i], (tuple, list)) or + hasattr(indices[i], 'ndim')): + if getattr(indices[i], 'ndim', 1) != 1: + # ValueError will typically not be caught upstream (in + # contrast to TypeError), indicating a true error + raise ValueError('index arrays must be 1-dimensional, ' + 'got {}-dim. array in axis {}' + ''.format(indices[i].ndim, i)) + + if leftmost_index_array_index is None: + leftmost_index_array_index = i + if index_array_len is None: + index_array_len = len(indices[i]) + elif len(indices[i]) != index_array_len: + raise ValueError('index arrays must all have the same ' + 'length, got lengths {} and {}' + ''.format(index_array_len, + len(indices[i]))) + + new_shape = list(shape) # a copy + + # Replace shape entries by -1 to signal collapsing, or by the true + # length after indexing + for i in range(len(indices)): + if isinstance(indices[i], Integral): + # Collapse + new_shape[i] = -1 + elif isinstance(indices[i], slice): + # Lenght of the axis after applying the slice + start, stop, step = indices[i].indices(shape[i]) + new_shape[i] = int(np.ceil((stop - start) / step)) + elif (isinstance(indices[i], (tuple, list)) or + hasattr(indices[i], 'ndim')): + # Add index array length at leftmost position, collapse + # otherwise + if i == leftmost_index_array_index: + new_shape[i] = index_array_len + else: + new_shape[i] = -1 + else: + raise TypeError('got invalid element {!r} in `indices`' + ''.format(indices[i])) + + # Collect axes to collapse into a list and remove them from `new_shape` + collapsed_axes = [i for i in range(len(new_shape)) if new_shape[i] == -1] + new_shape = [n for n in new_shape if n != -1] + + return tuple(new_shape), tuple(collapsed_axes), leftmost_index_array_index + + if __name__ == '__main__': from odl.util.testutils import run_doctests run_doctests() diff --git a/odl/util/testutils.py b/odl/util/testutils.py index 78f0f12a992..450626f0063 100644 --- a/odl/util/testutils.py +++ b/odl/util/testutils.py @@ -105,6 +105,10 @@ def all_equal(iter1, iter2): if iter1 is None and iter2 is None: return True + # Do it faster for arrays + if hasattr(iter1, '__array__') and hasattr(iter2, '__array__'): + return np.array_equal(iter1, iter2) + # If one nested iterator is exhausted, go to direct comparison try: it1 = iter(iter1) @@ -132,12 +136,6 @@ def all_equal(iter1, iter2): return True -def all_almost_equal_array(v1, v2, places): - return np.allclose(v1, v2, - rtol=10 ** (-places), atol=10 ** (-places), - equal_nan=True) - - def all_almost_equal(iter1, iter2, places=None): """Return ``True`` if all elements in ``a`` and ``b`` are almost equal.""" try: @@ -154,7 +152,9 @@ def all_almost_equal(iter1, iter2, places=None): # otherwise for recursive calls. if places is None: places = _places(iter1, iter2, None) - return all_almost_equal_array(iter1, iter2, places) + return np.allclose(iter1, iter2, + rtol=10 ** (-places), atol=10 ** (-places), + equal_nan=True) try: it1 = iter(iter1)