diff --git a/doc/indexing.rst b/doc/indexing.rst index ace960689a8..e8482ac66b3 100644 --- a/doc/indexing.rst +++ b/doc/indexing.rst @@ -209,12 +209,16 @@ simultaneously, returning a new dataset: .. ipython:: python - da = xr.DataArray(np.random.rand(4, 3), - [('time', pd.date_range('2000-01-01', periods=4)), - ('space', ['IA', 'IL', 'IN'])]) - ds = da.to_dataset(name='foo') + da = xr.DataArray( + np.random.rand(4, 3), + [ + ("time", pd.date_range("2000-01-01", periods=4)), + ("space", ["IA", "IL", "IN"]), + ], + ) + ds = da.to_dataset(name="foo") ds.isel(space=[0], time=[0]) - ds.sel(time='2000-01-01') + ds.sel(time="2000-01-01") Positional indexing on a dataset is not supported because the ordering of dimensions in a dataset is somewhat ambiguous (it can vary between different @@ -222,7 +226,6 @@ arrays). However, you can do normal indexing with dimension names: .. ipython:: python - ds[dict(space=[0], time=[0])] ds.loc[dict(time='2000-01-01')] @@ -248,7 +251,6 @@ Any variables with these dimensions are also dropped: ds.drop_dims('time') - .. _masking with where: Masking with ``where`` @@ -326,8 +328,12 @@ MATLAB, or after using the :py:func:`numpy.ix_` helper: .. ipython:: python - da = xr.DataArray(np.arange(12).reshape((3, 4)), dims=['x', 'y'], - coords={'x': [0, 1, 2], 'y': ['a', 'b', 'c', 'd']}) + + da = xr.DataArray( + np.arange(12).reshape((3, 4)), + dims=["x", "y"], + coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]}, + ) da da[[0, 1], [1, 1]] @@ -410,43 +416,56 @@ can use indexing with ``.loc`` : .. ipython:: python - ds = xr.tutorial.open_dataset('air_temperature') + ds = xr.tutorial.open_dataset("air_temperature") - #add an empty 2D dataarray - ds['empty']= xr.full_like(ds.air.mean('time'),fill_value=0) + # add an empty 2D dataarray + ds["empty"] = xr.full_like(ds.air.mean("time"), fill_value=0) - #modify one grid point using loc() - ds['empty'].loc[dict(lon=260, lat=30)] = 100 + # modify one grid point using loc() + ds["empty"].loc[dict(lon=260, lat=30)] = 100 - #modify a 2D region using loc() - lc = ds.coords['lon'] - la = ds.coords['lat'] - ds['empty'].loc[dict(lon=lc[(lc>220)&(lc<260)], lat=la[(la>20)&(la<60)])] = 100 + # modify a 2D region using loc() + lc = ds.coords["lon"] + la = ds.coords["lat"] + ds["empty"].loc[ + dict(lon=lc[(lc > 220) & (lc < 260)], lat=la[(la > 20) & (la < 60)]) + ] = 100 or :py:meth:`~xarray.where`: .. ipython:: python - #modify one grid point using xr.where() - ds['empty'] = xr.where((ds.coords['lat']==20)&(ds.coords['lon']==260), 100, ds['empty']) + # modify one grid point using xr.where() + ds["empty"] = xr.where( + (ds.coords["lat"] == 20) & (ds.coords["lon"] == 260), 100, ds["empty"] + ) + + # or modify a 2D region using xr.where() + mask = ( + (ds.coords["lat"] > 20) + & (ds.coords["lat"] < 60) + & (ds.coords["lon"] > 220) + & (ds.coords["lon"] < 260) + ) + ds["empty"] = xr.where(mask, 100, ds["empty"]) - #or modify a 2D region using xr.where() - mask = (ds.coords['lat']>20)&(ds.coords['lat']<60)&(ds.coords['lon']>220)&(ds.coords['lon']<260) - ds['empty'] = xr.where(mask, 100, ds['empty']) Vectorized indexing can also be used to assign values to xarray object. .. ipython:: python - da = xr.DataArray(np.arange(12).reshape((3, 4)), dims=['x', 'y'], - coords={'x': [0, 1, 2], 'y': ['a', 'b', 'c', 'd']}) + da = xr.DataArray( + np.arange(12).reshape((3, 4)), + dims=["x", "y"], + coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]}, + ) da da[0] = -1 # assignment with broadcasting da - ind_x = xr.DataArray([0, 1], dims=['x']) - ind_y = xr.DataArray([0, 1], dims=['y']) + ind_x = xr.DataArray([0, 1], dims=["x"]) + ind_y = xr.DataArray([0, 1], dims=["y"]) da[ind_x, ind_y] = -2 # assign -2 to (ix, iy) = (0, 0) and (1, 1) da @@ -508,10 +527,10 @@ flexible indexing. The following is an example of the pointwise indexing: .. ipython:: python - da = xr.DataArray(np.arange(56).reshape((7, 8)), dims=['x', 'y']) + da = xr.DataArray(np.arange(56).reshape((7, 8)), dims=["x", "y"]) da - da.isel(x=xr.DataArray([0, 1, 6], dims='z'), - y=xr.DataArray([0, 1, 0], dims='z')) + da.isel(x=xr.DataArray([0, 1, 6], dims="z"), y=xr.DataArray([0, 1, 0], dims="z")) + where three elements at ``(ix, iy) = ((0, 0), (1, 1), (6, 0))`` are selected and mapped along a new dimension ``z``. @@ -521,23 +540,27 @@ you can supply a :py:class:`~xarray.DataArray` with a coordinate, .. ipython:: python - da.isel(x=xr.DataArray([0, 1, 6], dims='z', - coords={'z': ['a', 'b', 'c']}), - y=xr.DataArray([0, 1, 0], dims='z')) - + da.isel( + x=xr.DataArray([0, 1, 6], dims="z", coords={"z": ["a", "b", "c"]}), + y=xr.DataArray([0, 1, 0], dims="z"), + ) + Analogously, label-based pointwise-indexing is also possible by the ``.sel`` method: .. ipython:: python - da = xr.DataArray(np.random.rand(4, 3), - [('time', pd.date_range('2000-01-01', periods=4)), - ('space', ['IA', 'IL', 'IN'])]) - times = xr.DataArray(pd.to_datetime(['2000-01-03', '2000-01-02', '2000-01-01']), - dims='new_time') - da.sel(space=xr.DataArray(['IA', 'IL', 'IN'], dims=['new_time']), - time=times) - + da = xr.DataArray( + np.random.rand(4, 3), + [ + ("time", pd.date_range("2000-01-01", periods=4)), + ("space", ["IA", "IL", "IN"]), + ], + ) + times = xr.DataArray( + pd.to_datetime(["2000-01-03", "2000-01-02", "2000-01-01"]), dims="new_time" + ) + da.sel(space=xr.DataArray(["IA", "IL", "IN"], dims=["new_time"]), time=times) .. _align and reindex: @@ -635,12 +658,16 @@ through the :py:attr:`~xarray.DataArray.indexes` attribute. .. ipython:: python - da = xr.DataArray(np.random.rand(4, 3), - [('time', pd.date_range('2000-01-01', periods=4)), - ('space', ['IA', 'IL', 'IN'])]) + da = xr.DataArray( + np.random.rand(4, 3), + [ + ("time", pd.date_range("2000-01-01", periods=4)), + ("space", ["IA", "IL", "IN"]), + ], + ) da da.indexes - da.indexes['time'] + da.indexes["time"] Use :py:meth:`~xarray.DataArray.get_index` to get an index for a dimension, falling back to a default :py:class:`pandas.RangeIndex` if it has no coordinate @@ -694,32 +721,31 @@ pandas: .. ipython:: python - midx = pd.MultiIndex.from_product([list('abc'), [0, 1]], - names=('one', 'two')) - mda = xr.DataArray(np.random.rand(6, 3), - [('x', midx), ('y', range(3))]) - mda - mda.sel(x=(list('ab'), [0])) + + midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two")) + mda = xr.DataArray(np.random.rand(6, 3), [("x", midx), ("y", range(3))]) + mda + mda.sel(x=(list("ab"), [0])) You can also select multiple elements by providing a list of labels or tuples or a slice of tuples: .. ipython:: python - mda.sel(x=[('a', 0), ('b', 1)]) + mda.sel(x=[('a', 0), ('b', 1)]) Additionally, xarray supports dictionaries: .. ipython:: python - mda.sel(x={'one': 'a', 'two': 0}) + mda.sel(x={'one': 'a', 'two': 0}) For convenience, ``sel`` also accepts multi-index levels directly as keyword arguments: .. ipython:: python - mda.sel(one='a', two=0) + mda.sel(one='a', two=0) Note that using ``sel`` it is not possible to mix a dimension indexer with level indexers for that dimension @@ -731,7 +757,7 @@ multi-index is reduced to a single index. .. ipython:: python - mda.loc[{'one': 'a'}, ...] + mda.loc[{'one': 'a'}, ...] Unlike pandas, xarray does not guess whether you provide index levels or dimensions when using ``loc`` in some ambiguous cases. For example, for diff --git a/doc/whats-new.rst b/doc/whats-new.rst index c552c362021..a257c146c64 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -74,6 +74,8 @@ New Features deterministic hashing in previous releases; this change implements it when whole xarray objects are embedded in a dask graph, e.g. when :py:meth:`DataArray.map_blocks` is invoked. (:issue:`3378`, :pull:`3446`) + xarray objects are embedded in a dask graph, e.g. when :meth:`DataArray.map` is + invoked. (:issue:`3378`, :pull:`3446`, :pull:`3515`) By `Deepak Cherian `_ and `Guido Imperiale `_. - xarray now respects the ``DataArray.encoding["coordinates"]`` attribute when writing to disk. @@ -91,9 +93,14 @@ Bug fixes By `Deepak Cherian `_. - Sync with cftime by removing `dayofwk=-1` for cftime>=1.0.4. By `Anderson Banihirwe `_. +- Rolling reduction operations no longer compute dask arrays by default. (:issue:`3161`). + In addition, the ``allow_lazy`` kwarg to ``reduce`` is deprecated. + By `Deepak Cherian `_. - Fix :py:meth:`xarray.core.groupby.DataArrayGroupBy.reduce` and :py:meth:`xarray.core.groupby.DatasetGroupBy.reduce` when reducing over multiple dimensions. (:issue:`3402`). By `Deepak Cherian `_ +- Allow appending datetime and bool data variables to zarr stores. + (:issue:`3480`). By `Akihiro Matsukawa `_. Documentation ~~~~~~~~~~~~~ @@ -220,6 +227,9 @@ Bug fixes By `Deepak Cherian `_. - Fix error in concatenating unlabeled dimensions (:pull:`3362`). By `Deepak Cherian `_. +- Warn if the ``dim`` kwarg is passed to rolling operations. This is redundant since a dimension is + specified when the :py:class:`DatasetRolling` or :py:class:`DataArrayRolling` object is created. + (:pull:`3362`). By `Deepak Cherian `_. Documentation ~~~~~~~~~~~~~ diff --git a/xarray/backends/api.py b/xarray/backends/api.py index d23594fc675..945b3937c43 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -1234,6 +1234,8 @@ def _validate_datatypes_for_zarr_append(dataset): def check_dtype(var): if ( not np.issubdtype(var.dtype, np.number) + and not np.issubdtype(var.dtype, np.datetime64) + and not np.issubdtype(var.dtype, np.bool) and not coding.strings.is_unicode_dtype(var.dtype) and not var.dtype == object ): @@ -1241,8 +1243,9 @@ def check_dtype(var): raise ValueError( "Invalid dtype for data variable: {} " "dtype must be a subtype of number, " - "a fixed sized string, a fixed size " - "unicode string or an object".format(var) + "datetime, bool, a fixed sized string, " + "a fixed size unicode string or an " + "object".format(var) ) for k in dataset.data_vars.values(): diff --git a/xarray/core/common.py b/xarray/core/common.py index d372115ea57..2afe4b4c3a7 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -43,14 +43,12 @@ def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool if include_skipna: def wrapped_func(self, dim=None, axis=None, skipna=None, **kwargs): - return self.reduce( - func, dim, axis, skipna=skipna, allow_lazy=True, **kwargs - ) + return self.reduce(func, dim, axis, skipna=skipna, **kwargs) else: def wrapped_func(self, dim=None, axis=None, **kwargs): # type: ignore - return self.reduce(func, dim, axis, allow_lazy=True, **kwargs) + return self.reduce(func, dim, axis, **kwargs) return wrapped_func @@ -83,20 +81,13 @@ def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool def wrapped_func(self, dim=None, skipna=None, **kwargs): return self.reduce( - func, - dim, - skipna=skipna, - numeric_only=numeric_only, - allow_lazy=True, - **kwargs, + func, dim, skipna=skipna, numeric_only=numeric_only, **kwargs ) else: def wrapped_func(self, dim=None, **kwargs): # type: ignore - return self.reduce( - func, dim, numeric_only=numeric_only, allow_lazy=True, **kwargs - ) + return self.reduce(func, dim, numeric_only=numeric_only, **kwargs) return wrapped_func diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 5e164f420c8..a192fe08cee 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -755,7 +755,9 @@ def reset_coords( return dataset def __dask_tokenize__(self): - return (type(self), self._variable, self._coords, self._name) + from dask.base import normalize_token + + return normalize_token((type(self), self._variable, self._coords, self._name)) def __dask_graph__(self): return self._to_temp_dataset().__dask_graph__() diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index dc5a315e72a..15a7209ab24 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -652,7 +652,11 @@ def load(self, **kwargs) -> "Dataset": return self def __dask_tokenize__(self): - return (type(self), self._variables, self._coord_names, self._attrs) + from dask.base import normalize_token + + return normalize_token( + (type(self), self._variables, self._coord_names, self._attrs) + ) def __dask_graph__(self): graphs = {k: v.__dask_graph__() for k, v in self.variables.items()} @@ -4027,7 +4031,7 @@ def reduce( keep_attrs: bool = None, keepdims: bool = False, numeric_only: bool = False, - allow_lazy: bool = False, + allow_lazy: bool = None, **kwargs: Any, ) -> "Dataset": """Reduce this dataset by applying `func` along some dimension(s). diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index 8ae65d9b9df..c73ee3cf7c5 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -585,9 +585,7 @@ def _first_or_last(self, op, skipna, keep_attrs): return self._obj if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) - return self.reduce( - op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs, allow_lazy=True - ) + return self.reduce(op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs) def first(self, skipna=None, keep_attrs=None): """Return the first element of each group along the group dimension diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index f4e571a8efe..a1864332f4d 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -1,4 +1,5 @@ import functools +import warnings from typing import Callable import numpy as np @@ -351,6 +352,14 @@ def _bottleneck_reduce(self, func, **kwargs): def _numpy_or_bottleneck_reduce( self, array_agg_func, bottleneck_move_func, **kwargs ): + if "dim" in kwargs: + warnings.warn( + f"Reductions will be applied along the rolling dimension '{self.dim}'. Passing the 'dim' kwarg to reduction operations has no effect and will raise an error in xarray 0.16.0.", + DeprecationWarning, + stacklevel=3, + ) + del kwargs["dim"] + if bottleneck_move_func is not None and not isinstance( self.obj.data, dask_array_type ): diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 916df75b3e0..cf97c997017 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1,5 +1,6 @@ import functools import itertools +import warnings from collections import defaultdict from datetime import timedelta from distutils.version import LooseVersion @@ -393,7 +394,9 @@ def compute(self, **kwargs): def __dask_tokenize__(self): # Use v.data, instead of v._data, in order to cope with the wrappers # around NetCDF and the like - return type(self), self._dims, self.data, self._attrs + from dask.base import normalize_token + + return normalize_token((type(self), self._dims, self.data, self._attrs)) def __dask_graph__(self): if isinstance(self._data, dask_array_type): @@ -1425,7 +1428,7 @@ def reduce( axis=None, keep_attrs=None, keepdims=False, - allow_lazy=False, + allow_lazy=None, **kwargs, ): """Reduce this array by applying `func` along some dimension(s). @@ -1466,7 +1469,17 @@ def reduce( if dim is not None: axis = self.get_axis_num(dim) + + if allow_lazy is not None: + warnings.warn( + "allow_lazy is deprecated and will be removed in version 0.16.0. It is now True by default.", + DeprecationWarning, + ) + else: + allow_lazy = True + input_data = self.data if allow_lazy else self.values + if axis is not None: data = func(input_data, axis=axis, **kwargs) else: @@ -1973,8 +1986,10 @@ def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False): self._data = PandasIndexAdapter(self._data) def __dask_tokenize__(self): + from dask.base import normalize_token + # Don't waste time converting pd.Index to np.ndarray - return (type(self), self._dims, self._data.array, self._attrs) + return normalize_token((type(self), self._dims, self._data.array, self._attrs)) def load(self): # data is already loaded into memory for IndexVariable diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index fa8ae9991d7..4c1f317342f 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -12,6 +12,7 @@ import xarray as xr import xarray.ufuncs as xu from xarray import DataArray, Dataset, Variable +from xarray.core import duck_array_ops from xarray.testing import assert_chunks_equal from xarray.tests import mock @@ -217,6 +218,8 @@ def test_reduce(self): self.assertLazyAndAllClose((u < 1).all("x"), (v < 1).all("x")) with raises_regex(NotImplementedError, "dask"): v.median() + with raise_if_dask_computes(): + v.reduce(duck_array_ops.mean) def test_missing_values(self): values = np.array([0, 1, np.nan, 3]) @@ -488,7 +491,17 @@ def test_groupby(self): v = self.lazy_array expected = u.groupby("x").mean(...) - actual = v.groupby("x").mean(...) + with raise_if_dask_computes(): + actual = v.groupby("x").mean(...) + self.assertLazyAndAllClose(expected, actual) + + def test_rolling(self): + u = self.eager_array + v = self.lazy_array + + expected = u.rolling(x=2).mean() + with raise_if_dask_computes(): + actual = v.rolling(x=2).mean() self.assertLazyAndAllClose(expected, actual) def test_groupby_first(self): @@ -500,7 +513,8 @@ def test_groupby_first(self): with raises_regex(NotImplementedError, "dask"): v.groupby("ab").first() expected = u.groupby("ab").first() - actual = v.groupby("ab").first(skipna=False) + with raise_if_dask_computes(): + actual = v.groupby("ab").first(skipna=False) self.assertLazyAndAllClose(expected, actual) def test_reindex(self): @@ -1283,6 +1297,32 @@ def test_token_identical(obj, transform): ) +def test_recursive_token(): + """Test that tokenization is invoked recursively, and doesn't just rely on the + output of str() + """ + a = np.ones(10000) + b = np.ones(10000) + b[5000] = 2 + assert str(a) == str(b) + assert dask.base.tokenize(a) != dask.base.tokenize(b) + + # Test DataArray and Variable + da_a = DataArray(a) + da_b = DataArray(b) + assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b) + + # Test Dataset + ds_a = da_a.to_dataset(name="x") + ds_b = da_b.to_dataset(name="x") + assert dask.base.tokenize(ds_a) != dask.base.tokenize(ds_b) + + # Test IndexVariable + da_a = DataArray(a, dims=["x"], coords={"x": a}) + da_b = DataArray(a, dims=["x"], coords={"x": b}) + assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b) + + @requires_scipy_or_netCDF4 def test_normalize_token_with_backend(map_ds): with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file: diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index 42fae2c9dd4..7c6dc1825a1 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -4188,6 +4188,9 @@ def test_rolling_wrapped_bottleneck(da, name, center, min_periods): ) assert_array_equal(actual.values, expected) + with pytest.warns(DeprecationWarning, match="Reductions will be applied"): + getattr(rolling_obj, name)(dim="time") + # Test center rolling_obj = da.rolling(time=7, center=center) actual = getattr(rolling_obj, name)()["time"] @@ -4203,6 +4206,9 @@ def test_rolling_wrapped_dask(da_dask, name, center, min_periods, window): # dask version rolling_obj = da_dask.rolling(time=window, min_periods=min_periods, center=center) actual = getattr(rolling_obj, name)().load() + if name != "count": + with pytest.warns(DeprecationWarning, match="Reductions will be applied"): + getattr(rolling_obj, name)(dim="time") # numpy version rolling_obj = da_dask.load().rolling( time=window, min_periods=min_periods, center=center diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index d001c43da94..67d3b3198dc 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -90,6 +90,14 @@ def create_append_test_data(seed=None): string_var = np.array(["ae", "bc", "df"], dtype=object) string_var_to_append = np.array(["asdf", "asdfg"], dtype=object) unicode_var = ["áó", "áó", "áó"] + datetime_var = np.array( + ["2019-01-01", "2019-01-02", "2019-01-03"], dtype="datetime64[s]" + ) + datetime_var_to_append = np.array( + ["2019-01-04", "2019-01-05"], dtype="datetime64[s]" + ) + bool_var = np.array([True, False, True], dtype=np.bool) + bool_var_to_append = np.array([False, True], dtype=np.bool) ds = xr.Dataset( data_vars={ @@ -102,6 +110,8 @@ def create_append_test_data(seed=None): "unicode_var": xr.DataArray( unicode_var, coords=[time1], dims=["time"] ).astype(np.unicode_), + "datetime_var": xr.DataArray(datetime_var, coords=[time1], dims=["time"]), + "bool_var": xr.DataArray(bool_var, coords=[time1], dims=["time"]), } ) @@ -118,6 +128,10 @@ def create_append_test_data(seed=None): "unicode_var": xr.DataArray( unicode_var[:nt2], coords=[time2], dims=["time"] ).astype(np.unicode_), + "datetime_var": xr.DataArray( + datetime_var_to_append, coords=[time2], dims=["time"] + ), + "bool_var": xr.DataArray(bool_var_to_append, coords=[time2], dims=["time"]), } ) diff --git a/xarray/tests/test_sparse.py b/xarray/tests/test_sparse.py index a31da162487..a02fef2faeb 100644 --- a/xarray/tests/test_sparse.py +++ b/xarray/tests/test_sparse.py @@ -856,6 +856,10 @@ def test_dask_token(): import dask s = sparse.COO.from_numpy(np.array([0, 0, 1, 2])) + + # https://github.com/pydata/sparse/issues/300 + s.__dask_tokenize__ = lambda: dask.base.normalize_token(s.__dict__) + a = DataArray(s) t1 = dask.base.tokenize(a) t2 = dask.base.tokenize(a) diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py index 528027ed149..d394919dbdd 100644 --- a/xarray/tests/test_variable.py +++ b/xarray/tests/test_variable.py @@ -1477,6 +1477,10 @@ def test_reduce(self): with raises_regex(ValueError, "cannot supply both"): v.mean(dim="x", axis=0) + with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"): + v.mean(dim="x", allow_lazy=True) + with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"): + v.mean(dim="x", allow_lazy=False) def test_quantile(self): v = Variable(["x", "y"], self.d)