Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Silence some warnings. #2328

Merged
merged 16 commits into from
Sep 4, 2018
7 changes: 5 additions & 2 deletions xarray/coding/times.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,11 @@ def decode_cf_datetime(num_dates, units, calendar=None,
# fixes: https://github.com/pydata/pandas/issues/14068
# these lines check if the the lowest or the highest value in dates
# cause an OutOfBoundsDatetime (Overflow) error
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'invalid value encountered',
RuntimeWarning)
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date

# Cast input dates to integers of nanoseconds because `pd.to_datetime`
# works much faster when dealing with integers
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/formatting.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def format_items(x):
day_part = (x[~pd.isnull(x)]
.astype('timedelta64[D]')
.astype('timedelta64[ns]'))
time_needed = x != day_part
time_needed = x[~pd.isnull(x)] != day_part
day_needed = day_part != np.timedelta64(0, 'ns')
if np.logical_not(day_needed).all():
timedelta_format = 'time'
Expand Down
19 changes: 12 additions & 7 deletions xarray/core/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from collections import Iterable
from functools import partial

import warnings

import numpy as np
import pandas as pd

Expand Down Expand Up @@ -207,13 +209,16 @@ def interp_na(self, dim=None, use_coordinate=True, method='linear', limit=None,
interp_class, kwargs = _get_interpolator(method, **kwargs)
interpolator = partial(func_interpolate_na, interp_class, **kwargs)

arr = apply_ufunc(interpolator, index, self,
input_core_dims=[[dim], [dim]],
output_core_dims=[[dim]],
output_dtypes=[self.dtype],
dask='parallelized',
vectorize=True,
keep_attrs=True).transpose(*self.dims)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'overflow', RuntimeWarning)
warnings.filterwarnings('ignore', 'invalid value', RuntimeWarning)
arr = apply_ufunc(interpolator, index, self,
input_core_dims=[[dim], [dim]],
output_core_dims=[[dim]],
output_dtypes=[self.dtype],
dask='parallelized',
vectorize=True,
keep_attrs=True).transpose(*self.dims)

if limit is not None:
arr = arr.where(valids)
Expand Down
7 changes: 6 additions & 1 deletion xarray/plot/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,9 +479,11 @@ def line(self, *args, **kwargs):

def _rescale_imshow_rgb(darray, vmin, vmax, robust):
assert robust or vmin is not None or vmax is not None
# TODO: remove when min numpy version is bumped to 1.13
# There's a cyclic dependency via DataArray, so we can't import from
# xarray.ufuncs in global scope.
from xarray.ufuncs import maximum, minimum

# Calculate vmin and vmax automatically for `robust=True`
if robust:
if vmax is None:
Expand All @@ -507,7 +509,10 @@ def _rescale_imshow_rgb(darray, vmin, vmax, robust):
# After scaling, downcast to 32-bit float. This substantially reduces
# memory usage after we hand `darray` off to matplotlib.
darray = ((darray.astype('f8') - vmin) / (vmax - vmin)).astype('f4')
return minimum(maximum(darray, 0), 1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'xarray.ufuncs',
PendingDeprecationWarning)
return minimum(maximum(darray, 0), 1)


def _plot2d(plotfunc):
Expand Down
4 changes: 3 additions & 1 deletion xarray/plot/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,8 +213,10 @@ def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None,
# Handle discrete levels
if levels is not None:
if is_scalar(levels):
if user_minmax or levels == 1:
if user_minmax:
levels = np.linspace(vmin, vmax, levels)
elif levels == 1:
levels = np.asarray([(vmin + vmax) / 2])
else:
# N in MaxNLocator refers to bins, not ticks
ticker = mpl.ticker.MaxNLocator(levels - 1)
Expand Down
49 changes: 22 additions & 27 deletions xarray/tests/test_backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -1789,6 +1789,7 @@ def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore(tmp_file, 'w')

@pytest.mark.filterwarnings('ignore:complex dtypes are supported by h5py')
def test_complex(self):
expected = Dataset({'x': ('y', np.ones(5) + 1j * np.ones(5))})
with self.roundtrip(expected) as actual:
Expand Down Expand Up @@ -2527,6 +2528,7 @@ class PyNioTestAutocloseTrue(PyNioTest):


@requires_pseudonetcdf
@pytest.mark.filterwarnings('ignore:IOAPI_ISPH is assumed to be 6370000')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is really nice! way better than using filterwarnings manually :)

class PseudoNetCDFFormatTest(TestCase):
autoclose = True

Expand Down Expand Up @@ -2658,14 +2660,11 @@ def test_uamiv_format_read(self):
"""
Open a CAMx file and test data variables
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning,
message=('IOAPI_ISPH is assumed to be ' +
'6370000.; consistent with WRF'))
camxfile = open_example_dataset('example.uamiv',
engine='pseudonetcdf',
autoclose=True,
backend_kwargs={'format': 'uamiv'})

camxfile = open_example_dataset('example.uamiv',
engine='pseudonetcdf',
autoclose=True,
backend_kwargs={'format': 'uamiv'})
data = np.arange(20, dtype='f').reshape(1, 1, 4, 5)
expected = xr.Variable(('TSTEP', 'LAY', 'ROW', 'COL'), data,
dict(units='ppm', long_name='O3'.ljust(16),
Expand All @@ -2687,17 +2686,14 @@ def test_uamiv_format_mfread(self):
"""
Open a CAMx file and test data variables
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning,
message=('IOAPI_ISPH is assumed to be ' +
'6370000.; consistent with WRF'))
camxfile = open_example_mfdataset(
['example.uamiv',
'example.uamiv'],
engine='pseudonetcdf',
autoclose=True,
concat_dim='TSTEP',
backend_kwargs={'format': 'uamiv'})

camxfile = open_example_mfdataset(
['example.uamiv',
'example.uamiv'],
engine='pseudonetcdf',
autoclose=True,
concat_dim='TSTEP',
backend_kwargs={'format': 'uamiv'})

data1 = np.arange(20, dtype='f').reshape(1, 1, 4, 5)
data = np.concatenate([data1] * 2, axis=0)
Expand All @@ -2720,19 +2716,18 @@ def test_uamiv_format_mfread(self):

def test_uamiv_format_write(self):
fmtkw = {'format': 'uamiv'}
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning,
message=('IOAPI_ISPH is assumed to be ' +
'6370000.; consistent with WRF'))
expected = open_example_dataset('example.uamiv',
engine='pseudonetcdf',
autoclose=False,
backend_kwargs=fmtkw)

expected = open_example_dataset('example.uamiv',
engine='pseudonetcdf',
autoclose=False,
backend_kwargs=fmtkw)
with self.roundtrip(expected,
save_kwargs=fmtkw,
open_kwargs={'backend_kwargs': fmtkw}) as actual:
assert_identical(expected, actual)

expected.close()

def save(self, dataset, path, **save_kwargs):
import PseudoNetCDF as pnc
pncf = pnc.PseudoNetCDFFile()
Expand Down
3 changes: 2 additions & 1 deletion xarray/tests/test_coding_times.py
Original file line number Diff line number Diff line change
Expand Up @@ -538,7 +538,8 @@ def test_cf_datetime_nan(num_dates, units, expected_list):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'All-NaN')
actual = coding.times.decode_cf_datetime(num_dates, units)
expected = np.array(expected_list, dtype='datetime64[ns]')
# use pandas because numpy will deprecate timezone-aware conversions
expected = pd.to_datetime(expected_list)
assert_array_equal(expected, actual)


Expand Down
11 changes: 9 additions & 2 deletions xarray/tests/test_dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,11 @@

class DaskTestCase(TestCase):
def assertLazyAnd(self, expected, actual, test):
with dask.set_options(get=dask.get):

with (dask.config.set(get=dask.get) if hasattr(dask, 'config')
else dask.set_options(get=dask.get)):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this just a version check? Generally, I prefer to see a version comparison so we can more obviously clean these things up when older versions are no longer supported.

(Same comment below)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ya it's basically a version check. I've made it an explicit version check now.

test(actual, expected)

if isinstance(actual, Dataset):
for k, v in actual.variables.items():
if k in actual.dims:
Expand Down Expand Up @@ -196,11 +199,13 @@ def test_missing_methods(self):
except NotImplementedError as err:
assert 'dask' in str(err)

@pytest.mark.filterwarnings('ignore::PendingDeprecationWarning')
def test_univariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))

@pytest.mark.filterwarnings('ignore::PendingDeprecationWarning')
def test_bivariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
Expand Down Expand Up @@ -421,6 +426,7 @@ def duplicate_and_merge(array):
actual = duplicate_and_merge(self.lazy_array)
self.assertLazyAndEqual(expected, actual)

@pytest.mark.filterwarnings('ignore::PendingDeprecationWarning')
def test_ufuncs(self):
u = self.eager_array
v = self.lazy_array
Expand Down Expand Up @@ -821,7 +827,8 @@ def test_basic_compute():
dask.multiprocessing.get,
dask.local.get_sync,
None]:
with dask.set_options(get=get):
with (dask.config.set(get=get) if hasattr(dask, 'config')
else dask.set_options(get=get)):
ds.compute()
ds.foo.compute()
ds.foo.variable.compute()
7 changes: 6 additions & 1 deletion xarray/tests/test_dataarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -672,6 +672,7 @@ def test_isel_types(self):
assert_identical(da.isel(x=np.array([0], dtype="int64")),
da.isel(x=np.array([0])))

@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_isel_fancy(self):
shape = (10, 7, 6)
np_array = np.random.random(shape)
Expand Down Expand Up @@ -845,6 +846,7 @@ def test_isel_drop(self):
selected = data.isel(x=0, drop=False)
assert_identical(expected, selected)

@pytest.mark.filterwarnings("ignore:Dataset.isel_points")
def test_isel_points(self):
shape = (10, 5, 6)
np_array = np.random.random(shape)
Expand Down Expand Up @@ -1237,6 +1239,7 @@ def test_reindex_like_no_index(self):
ValueError, 'different size for unlabeled'):
foo.reindex_like(bar)

@pytest.mark.filterwarnings('ignore:Indexer has dimensions')
def test_reindex_regressions(self):
# regression test for #279
expected = DataArray(np.random.randn(5), coords=[("time", range(5))])
Expand Down Expand Up @@ -1286,7 +1289,7 @@ def test_swap_dims(self):

def test_expand_dims_error(self):
array = DataArray(np.random.randn(3, 4), dims=['x', 'dim_0'],
coords={'x': np.linspace(0.0, 1.0, 3.0)},
coords={'x': np.linspace(0.0, 1.0, 3)},
attrs={'key': 'entry'})

with raises_regex(ValueError, 'dim should be str or'):
Expand Down Expand Up @@ -3529,6 +3532,8 @@ def test_rolling_reduce(da, center, min_periods, window, name):
@pytest.mark.parametrize('min_periods', (None, 1, 2, 3))
@pytest.mark.parametrize('window', (1, 2, 3, 4))
@pytest.mark.parametrize('name', ('sum', 'max'))
@pytest.mark.filterwarnings('ignore:Using a non-tuple sequence')
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Silences a warning that seems to be thrown by code in bottleneck

xarray/tests/test_dataarray.py::test_rolling_reduce_nonnumeric[sum-1-3-False]
  /home/travis/miniconda/envs/test_env/lib/python3.6/site-packages/bottleneck/slow/move.py:149: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
    nidx1 = n[idx1]
  /home/travis/miniconda/envs/test_env/lib/python3.6/site-packages/bottleneck/slow/move.py:150: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
    nidx1 = nidx1 - n[idx2]
  /home/travis/miniconda/envs/test_env/lib/python3.6/site-packages/bottleneck/slow/move.py:152: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
    idx[idx1] = nidx1 < min_count
  /home/travis/miniconda/envs/test_env/lib/python3.6/site-packages/bottleneck/slow/move.py:153: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
    idx[idx3] = n[idx3] < min_count

Copy link
Member

@shoyer shoyer Aug 20, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch -- can you file a report in bottleneck? I expect this would be easy to fix.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK. Filed a report upstream pydata/bottleneck#194 and reverted this commit.

# root cause of the warning is bottleneck
def test_rolling_reduce_nonnumeric(center, min_periods, window, name):
da = DataArray([0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7],
dims='time').isnull()
Expand Down
4 changes: 4 additions & 0 deletions xarray/tests/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -1240,6 +1240,7 @@ def test_isel_drop(self):
selected = data.isel(x=0, drop=False)
assert_identical(expected, selected)

@pytest.mark.filterwarnings("ignore:Dataset.isel_points")
def test_isel_points(self):
data = create_test_data()

Expand Down Expand Up @@ -1317,6 +1318,8 @@ def test_isel_points(self):
dim2=stations['dim2s'],
dim=np.array([4, 5, 6]))

@pytest.mark.filterwarnings("ignore:Dataset.sel_points")
@pytest.mark.filterwarnings("ignore:Dataset.isel_points")
def test_sel_points(self):
data = create_test_data()

Expand Down Expand Up @@ -1347,6 +1350,7 @@ def test_sel_points(self):
with pytest.raises(KeyError):
data.sel_points(x=[2.5], y=[2.0], method='pad', tolerance=1e-3)

@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_sel_fancy(self):
data = create_test_data()

Expand Down
16 changes: 8 additions & 8 deletions xarray/tests/test_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,14 +93,14 @@ def test_interpolate_pd_compat():


@requires_scipy
def test_scipy_methods_function():
for method in ['barycentric', 'krog', 'pchip', 'spline', 'akima']:
kwargs = {}
# Note: Pandas does some wacky things with these methods and the full
# integration tests wont work.
da, _ = make_interpolate_example_data((25, 25), 0.4, non_uniform=True)
actual = da.interpolate_na(method=method, dim='time', **kwargs)
assert (da.count('time') <= actual.count('time')).all()
@pytest.mark.parametrize('method', ['barycentric', 'krog',
'pchip', 'spline', 'akima'])
def test_scipy_methods_function(method):
# Note: Pandas does some wacky things with these methods and the full
# integration tests wont work.
da, _ = make_interpolate_example_data((25, 25), 0.4, non_uniform=True)
actual = da.interpolate_na(method=method, dim='time')
assert (da.count('time') <= actual.count('time')).all()


@requires_scipy
Expand Down
10 changes: 10 additions & 0 deletions xarray/tests/test_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,7 @@ def test_datetime_dimension(self):
assert ax.has_data()

@pytest.mark.slow
@pytest.mark.filterwarnings('ignore:tight_layout cannot')
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
Expand Down Expand Up @@ -328,6 +329,7 @@ def test_plot_size(self):
self.darray.plot(aspect=1)

@pytest.mark.slow
@pytest.mark.filterwarnings('ignore:tight_layout cannot')
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
Expand Down Expand Up @@ -775,10 +777,13 @@ def test_plot_nans(self):
clim2 = self.plotfunc(x2).get_clim()
assert clim1 == clim2

@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.filterwarnings('ignore:invalid value encountered')
def test_can_plot_all_nans(self):
# regression test for issue #1780
self.plotfunc(DataArray(np.full((2, 2), np.nan)))

@pytest.mark.filterwarnings('ignore: Attempting to set')
def test_can_plot_axis_size_one(self):
if self.plotfunc.__name__ not in ('contour', 'contourf'):
self.plotfunc(DataArray(np.ones((1, 1))))
Expand Down Expand Up @@ -970,6 +975,7 @@ def test_2d_function_and_method_signature_same(self):
del func_sig['darray']
assert func_sig == method_sig

@pytest.mark.filterwarnings('ignore:tight_layout cannot')
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
Expand Down Expand Up @@ -1001,6 +1007,7 @@ def test_convenient_facetgrid(self):
else:
assert '' == ax.get_xlabel()

@pytest.mark.filterwarnings('ignore:tight_layout cannot')
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
Expand Down Expand Up @@ -1279,6 +1286,7 @@ def test_imshow_rgb_values_in_valid_range(self):
assert out.dtype == np.uint8
assert (out[..., :3] == da.values).all() # Compare without added alpha

@pytest.mark.filterwarnings('ignore:Several dimensions of this array')
def test_regression_rgb_imshow_dim_size_one(self):
# Regression: https://github.com/pydata/xarray/issues/1966
da = DataArray(easy_array((1, 3, 3), start=0.0, stop=1.0))
Expand Down Expand Up @@ -1511,6 +1519,7 @@ def test_facetgrid_polar(self):
sharey=False)


@pytest.mark.filterwarnings('ignore:tight_layout cannot')
class TestFacetGrid4d(PlotTestCase):
def setUp(self):
a = easy_array((10, 15, 3, 2))
Expand Down Expand Up @@ -1538,6 +1547,7 @@ def test_default_labels(self):
assert substring_in_axes(label, ax)


@pytest.mark.filterwarnings('ignore:tight_layout cannot')
class TestFacetedLinePlots(PlotTestCase):
def setUp(self):
self.darray = DataArray(np.random.randn(10, 6, 3, 4),
Expand Down