Skip to content

Commit

Permalink
API: remove dist_using_inner, closes #963
Browse files Browse the repository at this point in the history
  • Loading branch information
Holger Kohr committed Sep 25, 2017
1 parent 4c7a623 commit 60c0516
Show file tree
Hide file tree
Showing 4 changed files with 72 additions and 414 deletions.
133 changes: 20 additions & 113 deletions odl/space/npy_tensors.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,6 @@ def __init__(self, shape, dtype=None, order='A', **kwargs):
mathematical requirements.
By default, ``dist(x, y)`` is calculated as ``norm(x - y)``.
This creates an intermediate array ``x - y``, which can be
avoided by choosing ``dist_using_inner=True``.
This option cannot be combined with ``weight``,
``norm`` or ``inner``. It also cannot be used in case of
Expand All @@ -150,19 +148,6 @@ def __init__(self, shape, dtype=None, order='A', **kwargs):
``dist`` or ``norm``. It also cannot be used in case of
non-numeric ``dtype``.
dist_using_inner : bool, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
This option can only be used if ``exponent`` is 2.0.
Default: False.
kwargs :
Further keyword arguments are passed to the weighting
classes.
Expand Down Expand Up @@ -234,7 +219,6 @@ def __init__(self, shape, dtype=None, order='A', **kwargs):
inner = kwargs.pop('inner', None)
weighting = kwargs.pop('weighting', None)
exponent = kwargs.pop('exponent', getattr(weighting, 'exponent', 2.0))
dist_using_inner = bool(kwargs.pop('dist_using_inner', False))

if (not is_numeric_dtype(self.dtype) and
any(x is not None for x in (dist, norm, inner, weighting))):
Expand All @@ -259,15 +243,9 @@ def __init__(self, shape, dtype=None, order='A', **kwargs):
raise ValueError('`weighting.exponent` conflicts with '
'`exponent`: {} != {}'
''.format(weighting.exponent, exponent))
if weighting.dist_using_inner != dist_using_inner:
raise ValueError('`weighting.dist_using_inner` conflicts '
'with `dist_using_inner`: {} != {}'
''.format(weighting.dist_using_inner,
dist_using_inner))
self.__weighting = weighting
else:
self.__weighting = _weighting(
weighting, exponent, dist_using_inner=dist_using_inner)
self.__weighting = _weighting(weighting, exponent)

# Check (afterwards) that the weighting input was sane
if isinstance(self.weighting, NumpyTensorSpaceArrayWeighting):
Expand All @@ -293,8 +271,7 @@ def __init__(self, shape, dtype=None, order='A', **kwargs):
elif inner is not None:
self.__weighting = NumpyTensorSpaceCustomInner(inner)
else:
self.__weighting = NumpyTensorSpaceNoWeighting(
exponent, dist_using_inner=dist_using_inner)
self.__weighting = NumpyTensorSpaceNoWeighting(exponent)

@property
def impl(self):
Expand Down Expand Up @@ -1529,18 +1506,15 @@ def fallback_copy(x1, x2, n):
out.data[:] = out_arr.reshape(out.shape, order=ravel_order)


def _weighting(weights, exponent, dist_using_inner=False):
def _weighting(weights, exponent):
"""Return a weighting whose type is inferred from the arguments."""
if np.isscalar(weights):
weighting = NumpyTensorSpaceConstWeighting(
weights, exponent, dist_using_inner=dist_using_inner)
weighting = NumpyTensorSpaceConstWeighting(weights, exponent)
elif weights is None:
weighting = NumpyTensorSpaceNoWeighting(
exponent, dist_using_inner=dist_using_inner)
weighting = NumpyTensorSpaceNoWeighting(exponent)
else: # last possibility: make an array
arr = np.asarray(weights)
weighting = NumpyTensorSpaceArrayWeighting(
arr, exponent, dist_using_inner=dist_using_inner)
weighting = NumpyTensorSpaceArrayWeighting(arr, exponent)
return weighting


Expand Down Expand Up @@ -1594,7 +1568,7 @@ def npy_weighted_norm(weights, exponent=2.0):
return _weighting(weights, exponent=exponent).norm


def npy_weighted_dist(weights, exponent=2.0, use_inner=False):
def npy_weighted_dist(weights, exponent=2.0):
"""Weighted distance on `TensorSpace`'s as free function.
Parameters
Expand All @@ -1604,16 +1578,6 @@ def npy_weighted_dist(weights, exponent=2.0, use_inner=False):
constant weight, a 1-dim. array as a weighting vector.
exponent : positive `float`
Exponent of the norm.
use_inner : `bool`, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
Can only be used if ``exponent`` is 2.0.
Returns
-------
Expand All @@ -1627,8 +1591,7 @@ def npy_weighted_dist(weights, exponent=2.0, use_inner=False):
NumpyTensorSpaceConstWeighting
NumpyTensorSpaceArrayWeighting
"""
return _weighting(weights, exponent=exponent,
dist_using_inner=use_inner).dist
return _weighting(weights, exponent=exponent).dist


def _norm_default(x):
Expand Down Expand Up @@ -1694,7 +1657,7 @@ class NumpyTensorSpaceArrayWeighting(ArrayWeighting):
See ``Notes`` for mathematical details.
"""

def __init__(self, array, exponent=2.0, dist_using_inner=False):
def __init__(self, array, exponent=2.0):
"""Initialize a new instance.
Parameters
Expand All @@ -1706,16 +1669,6 @@ def __init__(self, array, exponent=2.0, dist_using_inner=False):
exponent : positive `float`
Exponent of the norm. For values other than 2.0, no inner
product is defined.
dist_using_inner : `bool`, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
This option can only be used if ``exponent`` is 2.0.
Notes
-----
Expand Down Expand Up @@ -1762,13 +1715,11 @@ def __init__(self, array, exponent=2.0, dist_using_inner=False):
is not checked during initialization.
"""
super(NumpyTensorSpaceArrayWeighting, self).__init__(
array, impl='numpy', exponent=exponent,
dist_using_inner=dist_using_inner)
array, impl='numpy', exponent=exponent)

def __hash__(self):
"""Return ``hash(self)``."""
return hash((type(self), self.array.tobytes(), self.exponent,
self.dist_using_inner))
return hash((type(self), self.array.tobytes(), self.exponent))

def inner(self, x1, x2):
"""Return the weighted inner product of ``x1`` and ``x2``.
Expand Down Expand Up @@ -1823,7 +1774,7 @@ class NumpyTensorSpaceConstWeighting(ConstWeighting):
See ``Notes`` for mathematical details.
"""

def __init__(self, const, exponent=2.0, dist_using_inner=False):
def __init__(self, const, exponent=2.0):
"""Initialize a new instance.
Parameters
Expand All @@ -1833,16 +1784,6 @@ def __init__(self, const, exponent=2.0, dist_using_inner=False):
exponent : positive float
Exponent of the norm. For values other than 2.0, the inner
product is not defined.
dist_using_inner : `bool`, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
This option can only be used if ``exponent`` is 2.0.
Notes
-----
Expand Down Expand Up @@ -1884,8 +1825,7 @@ def __init__(self, const, exponent=2.0, dist_using_inner=False):
inner product or norm, respectively.
"""
super(NumpyTensorSpaceConstWeighting, self).__init__(
const, impl='numpy', exponent=exponent,
dist_using_inner=dist_using_inner)
const, impl='numpy', exponent=exponent)

def inner(self, x1, x2):
"""Return the weighted inner product of ``x1`` and ``x2``.
Expand Down Expand Up @@ -1942,13 +1882,7 @@ def dist(self, x1, x2):
dist : float
The distance between the tensors.
"""
if self.dist_using_inner:
dist_squared = (_norm_default(x1) ** 2 + _norm_default(x2) ** 2 -
2 * _inner_default(x1, x2).real)
if dist_squared < 0.0: # Compensate for numerical error
dist_squared = 0.0
return float(np.sqrt(self.const * dist_squared))
elif self.exponent == 2.0:
if self.exponent == 2.0:
return float(np.sqrt(self.const) * _norm_default(x1 - x2))
elif self.exponent == float('inf'):
return float(self.const * _pnorm_default(x1 - x2, self.exponent))
Expand All @@ -1969,52 +1903,35 @@ def __new__(cls, *args, **kwargs):
"""Implement singleton pattern if ``exp==2.0``."""
if len(args) == 0:
exponent = kwargs.pop('exponent', 2.0)
dist_using_inner = kwargs.pop('dist_using_inner', False)
elif len(args) == 1:
exponent = args[0]
args = args[1:]
dist_using_inner = kwargs.pop('dist_using_inner', False)
else:
exponent = args[0]
dist_using_inner = args[1]
args = args[2:]
args = args[1:]

if exponent == 2.0 and not dist_using_inner:
if exponent == 2.0:
if not cls.__instance:
inst = super().__new__(cls, *args, **kwargs)
cls.__instance = inst
return cls.__instance
else:
return super().__new__(cls, *args, **kwargs)

def __init__(self, exponent=2.0, dist_using_inner=False):
def __init__(self, exponent=2.0):
"""Initialize a new instance.
Parameters
----------
exponent : positive `float`, optional
Exponent of the norm. For values other than 2.0, the inner
product is not defined.
dist_using_inner : `bool`, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
This option can only be used if ``exponent`` is 2.0.
"""
super().__init__(impl='numpy', exponent=exponent,
dist_using_inner=dist_using_inner)
super().__init__(impl='numpy', exponent=exponent)


class NumpyTensorSpaceCustomInner(CustomInner):

"""Class for handling a user-specified inner product."""

def __init__(self, inner, dist_using_inner=True):
def __init__(self, inner):
"""Initialize a new instance.
Parameters
Expand All @@ -2028,18 +1945,8 @@ def __init__(self, inner, dist_using_inner=True):
- ``<x, y> = conj(<y, x>)``
- ``<s*x + y, z> = s * <x, z> + <y, z>``
- ``<x, x> = 0`` if and only if ``x = 0``
dist_using_inner : `bool`, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
"""
super().__init__(inner, impl='numpy',
dist_using_inner=dist_using_inner)
super().__init__(inner, impl='numpy')


class NumpyTensorSpaceCustomNorm(CustomNorm):
Expand Down
Loading

0 comments on commit 60c0516

Please sign in to comment.