From 56bf01cf91782e4e7bc5b947af6634ccf6c5169f Mon Sep 17 00:00:00 2001 From: Richard Kleijn Date: Sat, 2 Jul 2022 22:49:46 +0200 Subject: [PATCH 1/8] improve typing of DataArray and Dataset reductions --- xarray/core/_aggregations.py | 1244 +------------------------- xarray/util/generate_aggregations.py | 11 +- 2 files changed, 38 insertions(+), 1217 deletions(-) diff --git a/xarray/core/_aggregations.py b/xarray/core/_aggregations.py index d5070f97c6a..3315a6a68e5 100644 --- a/xarray/core/_aggregations.py +++ b/xarray/core/_aggregations.py @@ -6,6 +6,8 @@ from collections.abc import Sequence from typing import TYPE_CHECKING, Any, Callable +from typing_extensions import Self + from xarray.core import duck_array_ops from xarray.core.options import OPTIONS from xarray.core.types import Dims @@ -30,7 +32,7 @@ def reduce( keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, - ) -> Dataset: + ) -> Self: raise NotImplementedError() def count( @@ -39,7 +41,7 @@ def count( *, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``count`` along some dimension(s). @@ -83,19 +85,8 @@ def count( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.count() - - Dimensions: () - Data variables: - da int64 5 """ return self.reduce( duck_array_ops.count, @@ -111,7 +102,7 @@ def all( *, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``all`` along some dimension(s). @@ -155,19 +146,8 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.all() - - Dimensions: () - Data variables: - da bool False """ return self.reduce( duck_array_ops.array_all, @@ -183,7 +163,7 @@ def any( *, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``any`` along some dimension(s). @@ -227,19 +207,8 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.any() - - Dimensions: () - Data variables: - da bool True """ return self.reduce( duck_array_ops.array_any, @@ -256,7 +225,7 @@ def max( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``max`` along some dimension(s). @@ -305,27 +274,12 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.max() - - Dimensions: () - Data variables: - da float64 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.max(skipna=False) - - Dimensions: () - Data variables: - da float64 nan """ return self.reduce( duck_array_ops.max, @@ -343,7 +297,7 @@ def min( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``min`` along some dimension(s). @@ -392,27 +346,12 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.min() - - Dimensions: () - Data variables: - da float64 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.min(skipna=False) - - Dimensions: () - Data variables: - da float64 nan """ return self.reduce( duck_array_ops.min, @@ -430,7 +369,7 @@ def mean( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``mean`` along some dimension(s). @@ -483,27 +422,12 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.mean() - - Dimensions: () - Data variables: - da float64 1.6 Use ``skipna`` to control whether NaNs are ignored. >>> ds.mean(skipna=False) - - Dimensions: () - Data variables: - da float64 nan """ return self.reduce( duck_array_ops.mean, @@ -522,7 +446,7 @@ def prod( min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``prod`` along some dimension(s). @@ -581,35 +505,16 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.prod() - - Dimensions: () - Data variables: - da float64 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.prod(skipna=False) - - Dimensions: () - Data variables: - da float64 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.prod(skipna=True, min_count=2) - - Dimensions: () - Data variables: - da float64 0.0 """ return self.reduce( duck_array_ops.prod, @@ -629,7 +534,7 @@ def sum( min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``sum`` along some dimension(s). @@ -688,35 +593,16 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.sum() - - Dimensions: () - Data variables: - da float64 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.sum(skipna=False) - - Dimensions: () - Data variables: - da float64 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.sum(skipna=True, min_count=2) - - Dimensions: () - Data variables: - da float64 8.0 """ return self.reduce( duck_array_ops.sum, @@ -736,7 +622,7 @@ def std( ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``std`` along some dimension(s). @@ -792,35 +678,16 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.std() - - Dimensions: () - Data variables: - da float64 1.02 Use ``skipna`` to control whether NaNs are ignored. >>> ds.std(skipna=False) - - Dimensions: () - Data variables: - da float64 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.std(skipna=True, ddof=1) - - Dimensions: () - Data variables: - da float64 1.14 """ return self.reduce( duck_array_ops.std, @@ -840,7 +707,7 @@ def var( ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``var`` along some dimension(s). @@ -896,35 +763,16 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.var() - - Dimensions: () - Data variables: - da float64 1.04 Use ``skipna`` to control whether NaNs are ignored. >>> ds.var(skipna=False) - - Dimensions: () - Data variables: - da float64 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.var(skipna=True, ddof=1) - - Dimensions: () - Data variables: - da float64 1.3 """ return self.reduce( duck_array_ops.var, @@ -943,7 +791,7 @@ def median( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``median`` along some dimension(s). @@ -996,27 +844,12 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.median() - - Dimensions: () - Data variables: - da float64 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.median(skipna=False) - - Dimensions: () - Data variables: - da float64 nan """ return self.reduce( duck_array_ops.median, @@ -1034,7 +867,7 @@ def cumsum( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). @@ -1087,29 +920,12 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.cumsum() - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 3.0 6.0 6.0 8.0 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumsum(skipna=False) - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 3.0 6.0 6.0 8.0 nan """ return self.reduce( duck_array_ops.cumsum, @@ -1127,7 +943,7 @@ def cumprod( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> Dataset: + ) -> Self: """ Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). @@ -1180,29 +996,12 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.cumprod() - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 6.0 0.0 0.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumprod(skipna=False) - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 6.0 0.0 0.0 nan """ return self.reduce( duck_array_ops.cumprod, @@ -1226,7 +1025,7 @@ def reduce( keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, - ) -> DataArray: + ) -> Self: raise NotImplementedError() def count( @@ -1235,7 +1034,7 @@ def count( *, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``count`` along some dimension(s). @@ -1278,15 +1077,8 @@ def count( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.count() - - array(5) """ return self.reduce( duck_array_ops.count, @@ -1301,7 +1093,7 @@ def all( *, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``all`` along some dimension(s). @@ -1344,15 +1136,8 @@ def all( ... ), ... ) >>> da - - array([ True, True, True, True, True, False]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.all() - - array(False) """ return self.reduce( duck_array_ops.array_all, @@ -1367,7 +1152,7 @@ def any( *, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``any`` along some dimension(s). @@ -1410,15 +1195,8 @@ def any( ... ), ... ) >>> da - - array([ True, True, True, True, True, False]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.any() - - array(True) """ return self.reduce( duck_array_ops.array_any, @@ -1434,7 +1212,7 @@ def max( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``max`` along some dimension(s). @@ -1482,21 +1260,12 @@ def max( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.max() - - array(3.) Use ``skipna`` to control whether NaNs are ignored. >>> da.max(skipna=False) - - array(nan) """ return self.reduce( duck_array_ops.max, @@ -1513,7 +1282,7 @@ def min( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``min`` along some dimension(s). @@ -1561,21 +1330,12 @@ def min( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.min() - - array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.min(skipna=False) - - array(nan) """ return self.reduce( duck_array_ops.min, @@ -1592,7 +1352,7 @@ def mean( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``mean`` along some dimension(s). @@ -1644,21 +1404,12 @@ def mean( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.mean() - - array(1.6) Use ``skipna`` to control whether NaNs are ignored. >>> da.mean(skipna=False) - - array(nan) """ return self.reduce( duck_array_ops.mean, @@ -1676,7 +1427,7 @@ def prod( min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``prod`` along some dimension(s). @@ -1734,27 +1485,16 @@ def prod( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.prod() - - array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.prod(skipna=False) - - array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.prod(skipna=True, min_count=2) - - array(0.) """ return self.reduce( duck_array_ops.prod, @@ -1773,7 +1513,7 @@ def sum( min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``sum`` along some dimension(s). @@ -1831,27 +1571,16 @@ def sum( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.sum() - - array(8.) Use ``skipna`` to control whether NaNs are ignored. >>> da.sum(skipna=False) - - array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.sum(skipna=True, min_count=2) - - array(8.) """ return self.reduce( duck_array_ops.sum, @@ -1870,7 +1599,7 @@ def std( ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``std`` along some dimension(s). @@ -1925,27 +1654,16 @@ def std( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.std() - - array(1.0198039) Use ``skipna`` to control whether NaNs are ignored. >>> da.std(skipna=False) - - array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.std(skipna=True, ddof=1) - - array(1.14017543) """ return self.reduce( duck_array_ops.std, @@ -1964,7 +1682,7 @@ def var( ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``var`` along some dimension(s). @@ -2019,27 +1737,16 @@ def var( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.var() - - array(1.04) Use ``skipna`` to control whether NaNs are ignored. >>> da.var(skipna=False) - - array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.var(skipna=True, ddof=1) - - array(1.3) """ return self.reduce( duck_array_ops.var, @@ -2057,7 +1764,7 @@ def median( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``median`` along some dimension(s). @@ -2109,21 +1816,12 @@ def median( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.median() - - array(2.) Use ``skipna`` to control whether NaNs are ignored. >>> da.median(skipna=False) - - array(nan) """ return self.reduce( duck_array_ops.median, @@ -2140,7 +1838,7 @@ def cumsum( skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). @@ -2192,27 +1890,12 @@ def cumsum( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.cumsum() - - array([1., 3., 6., 6., 8., 8.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.cumsum(skipna=False) - - array([ 1., 3., 6., 6., 8., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) DataArray: + ) -> Self: """ Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). @@ -2281,27 +1964,12 @@ def cumprod( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.cumprod() - - array([1., 2., 6., 0., 0., 0.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.cumprod(skipna=False) - - array([ 1., 2., 6., 0., 0., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").count() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) int64 1 2 2 """ if ( flox_available @@ -2492,21 +2147,8 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").all() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) bool False True True """ if ( flox_available @@ -2590,21 +2232,8 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").any() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) bool True True True """ if ( flox_available @@ -2694,31 +2323,12 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").max() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 1.0 2.0 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").max(skipna=False) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 2.0 3.0 """ if ( flox_available @@ -2810,31 +2420,12 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").min() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 1.0 2.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").min(skipna=False) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 2.0 0.0 """ if ( flox_available @@ -2928,31 +2519,12 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").mean() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").mean(skipna=False) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 2.0 1.5 """ if ( flox_available @@ -3053,41 +2625,16 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").prod() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 1.0 4.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").prod(skipna=False) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 4.0 0.0 Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").prod(skipna=True, min_count=2) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 4.0 0.0 """ if ( flox_available @@ -3190,41 +2737,16 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").sum() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 1.0 4.0 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").sum(skipna=False) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 4.0 3.0 Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").sum(skipna=True, min_count=2) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 4.0 3.0 """ if ( flox_available @@ -3324,41 +2846,16 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").std() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 0.0 0.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").std(skipna=False) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 0.0 1.5 Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").std(skipna=True, ddof=1) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 0.0 2.121 """ if ( flox_available @@ -3458,41 +2955,16 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").var() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 0.0 0.0 2.25 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").var(skipna=False) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 0.0 2.25 Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").var(skipna=True, ddof=1) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 0.0 4.5 """ if ( flox_available @@ -3588,31 +3060,12 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").median() - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").median(skipna=False) - - Dimensions: (labels: 3) - Coordinates: - * labels (labels) object 'a' 'b' 'c' - Data variables: - da (labels) float64 nan 2.0 1.5 """ return self.reduce( duck_array_ops.median, @@ -3691,29 +3144,12 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").cumsum() - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 3.0 3.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumsum(skipna=False) - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 3.0 3.0 4.0 nan """ return self.reduce( duck_array_ops.cumsum, @@ -3792,29 +3228,12 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").cumprod() - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 3.0 0.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumprod(skipna=False) - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 3.0 0.0 4.0 nan """ return self.reduce( duck_array_ops.cumprod, @@ -3908,21 +3327,8 @@ def count( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").count() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) int64 1 3 1 """ if ( flox_available @@ -4006,21 +3412,8 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").all() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) bool True True False """ if ( flox_available @@ -4104,21 +3497,8 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").any() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) bool True True True """ if ( flox_available @@ -4208,31 +3588,12 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").max() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 3.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").max(skipna=False) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 3.0 nan """ if ( flox_available @@ -4324,31 +3685,12 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").min() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").min(skipna=False) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 0.0 nan """ if ( flox_available @@ -4442,31 +3784,12 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").mean() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 1.667 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").mean(skipna=False) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 1.667 nan """ if ( flox_available @@ -4567,41 +3890,16 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").prod() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").prod(skipna=False) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 0.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3M").prod(skipna=True, min_count=2) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 nan 0.0 nan """ if ( flox_available @@ -4704,41 +4002,16 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").sum() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 5.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").sum(skipna=False) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 5.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3M").sum(skipna=True, min_count=2) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 nan 5.0 nan """ if ( flox_available @@ -4838,41 +4111,16 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").std() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 0.0 1.247 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").std(skipna=False) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 0.0 1.247 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3M").std(skipna=True, ddof=1) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 nan 1.528 nan """ if ( flox_available @@ -4972,41 +4220,16 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").var() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 0.0 1.556 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").var(skipna=False) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 0.0 1.556 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3M").var(skipna=True, ddof=1) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 nan 2.333 nan """ if ( flox_available @@ -5102,31 +4325,12 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").median() - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").median(skipna=False) - - Dimensions: (time: 3) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 - Data variables: - da (time) float64 1.0 2.0 nan """ return self.reduce( duck_array_ops.median, @@ -5205,29 +4409,12 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").cumsum() - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 5.0 5.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").cumsum(skipna=False) - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 5.0 5.0 2.0 nan """ return self.reduce( duck_array_ops.cumsum, @@ -5306,29 +4493,12 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - - Dimensions: (time: 6) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3M").cumprod() - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 6.0 0.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").cumprod(skipna=False) - - Dimensions: (time: 6) - Dimensions without coordinates: time - Data variables: - da (time) float64 1.0 2.0 6.0 0.0 2.0 nan """ return self.reduce( duck_array_ops.cumprod, @@ -5421,17 +4591,8 @@ def count( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").count() - - array([1, 2, 2]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5512,17 +4673,8 @@ def all( ... ), ... ) >>> da - - array([ True, True, True, True, True, False]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").all() - - array([False, True, True]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5603,17 +4755,8 @@ def any( ... ), ... ) >>> da - - array([ True, True, True, True, True, False]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").any() - - array([ True, True, True]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5700,25 +4843,12 @@ def max( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").max() - - array([1., 2., 3.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").max(skipna=False) - - array([nan, 2., 3.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5807,25 +4937,12 @@ def min( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").min() - - array([1., 2., 0.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").min(skipna=False) - - array([nan, 2., 0.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5916,25 +5033,12 @@ def mean( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").mean() - - array([1. , 2. , 1.5]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").mean(skipna=False) - - array([nan, 2. , 1.5]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6032,33 +5136,16 @@ def prod( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").prod() - - array([1., 4., 0.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").prod(skipna=False) - - array([nan, 4., 0.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").prod(skipna=True, min_count=2) - - array([nan, 4., 0.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6158,33 +5245,16 @@ def sum( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").sum() - - array([1., 4., 3.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").sum(skipna=False) - - array([nan, 4., 3.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").sum(skipna=True, min_count=2) - - array([nan, 4., 3.]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6281,33 +5351,16 @@ def std( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").std() - - array([0. , 0. , 1.5]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").std(skipna=False) - - array([nan, 0. , 1.5]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").std(skipna=True, ddof=1) - - array([ nan, 0. , 2.12132034]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6404,33 +5457,16 @@ def var( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").var() - - array([0. , 0. , 2.25]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").var(skipna=False) - - array([ nan, 0. , 2.25]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").var(skipna=True, ddof=1) - - array([nan, 0. , 4.5]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6523,25 +5559,12 @@ def median( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").median() - - array([1. , 2. , 1.5]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").median(skipna=False) - - array([nan, 2. , 1.5]) - Coordinates: - * labels (labels) object 'a' 'b' 'c' """ return self.reduce( duck_array_ops.median, @@ -6618,27 +5641,12 @@ def cumsum( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").cumsum() - - array([1., 2., 3., 3., 4., 1.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").cumsum(skipna=False) - - array([ 1., 2., 3., 3., 4., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").cumprod() - - array([1., 2., 3., 0., 4., 1.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").cumprod(skipna=False) - - array([ 1., 2., 3., 0., 4., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").count() - - array([1, 3, 1]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6918,17 +5902,8 @@ def all( ... ), ... ) >>> da - - array([ True, True, True, True, True, False]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").all() - - array([ True, True, False]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7009,17 +5984,8 @@ def any( ... ), ... ) >>> da - - array([ True, True, True, True, True, False]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").any() - - array([ True, True, True]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7106,25 +6072,12 @@ def max( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").max() - - array([1., 3., 2.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").max(skipna=False) - - array([ 1., 3., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7213,25 +6166,12 @@ def min( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").min() - - array([1., 0., 2.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").min(skipna=False) - - array([ 1., 0., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7322,25 +6262,12 @@ def mean( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").mean() - - array([1. , 1.66666667, 2. ]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").mean(skipna=False) - - array([1. , 1.66666667, nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7438,33 +6365,16 @@ def prod( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").prod() - - array([1., 0., 2.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").prod(skipna=False) - - array([ 1., 0., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3M").prod(skipna=True, min_count=2) - - array([nan, 0., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7564,33 +6474,16 @@ def sum( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").sum() - - array([1., 5., 2.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").sum(skipna=False) - - array([ 1., 5., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3M").sum(skipna=True, min_count=2) - - array([nan, 5., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7687,33 +6580,16 @@ def std( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").std() - - array([0. , 1.24721913, 0. ]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").std(skipna=False) - - array([0. , 1.24721913, nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3M").std(skipna=True, ddof=1) - - array([ nan, 1.52752523, nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7810,33 +6686,16 @@ def var( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").var() - - array([0. , 1.55555556, 0. ]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").var(skipna=False) - - array([0. , 1.55555556, nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3M").var(skipna=True, ddof=1) - - array([ nan, 2.33333333, nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7929,25 +6788,12 @@ def median( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").median() - - array([1., 2., 2.]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").median(skipna=False) - - array([ 1., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ return self.reduce( duck_array_ops.median, @@ -8024,27 +6870,12 @@ def cumsum( ... ), ... ) >>> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").cumsum() - - array([1., 2., 5., 5., 2., 2.]) - Coordinates: - labels (time) >> da.resample(time="3M").cumsum(skipna=False) - - array([ 1., 2., 5., 5., 2., nan]) - Coordinates: - labels (time) >> da - - array([ 1., 2., 3., 0., 2., nan]) - Coordinates: - * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3M").cumprod() - - array([1., 2., 6., 0., 2., 2.]) - Coordinates: - labels (time) >> da.resample(time="3M").cumprod(skipna=False) - - array([ 1., 2., 6., 0., 2., nan]) - Coordinates: - labels (time) {obj}: + ) -> Self: raise NotImplementedError()""" GROUPBY_PREAMBLE = """ @@ -108,7 +110,7 @@ def {method}( *,{extra_kwargs} keep_attrs: bool | None = None, **kwargs: Any, - ) -> {obj}: + ) -> Self: """ Reduce this {obj}'s data by applying ``{method}`` along some dimension(s). @@ -296,7 +298,10 @@ def generate_methods(self): yield self.generate_method(method) def generate_method(self, method): - template_kwargs = dict(obj=self.datastructure.name, method=method.name) + template_kwargs = dict( + obj=self.datastructure.name, + method=method.name, + ) if method.extra_kwargs: extra_kwargs = "\n " + "\n ".join( From 379bf9d072ec3e9244e0b8c7555931dcf5406810 Mon Sep 17 00:00:00 2001 From: Richard Kleijn Date: Sun, 3 Jul 2022 10:47:10 +0200 Subject: [PATCH 2/8] fix some mypy errors --- xarray/core/rolling.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index 916fabe42ac..55901d56424 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -4,7 +4,7 @@ import itertools import math import warnings -from collections.abc import Hashable, Iterator, Mapping +from collections.abc import Hashable, Iterator, Mapping, Sequence from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar import numpy as np @@ -467,9 +467,8 @@ def reduce( obj, rolling_dim, keep_attrs=keep_attrs, fill_value=fillna ) - result = windows.reduce( - func, dim=list(rolling_dim.values()), keep_attrs=keep_attrs, **kwargs - ) + dim: Sequence[Hashable] = list(rolling_dim.values()) + result = windows.reduce(func, dim=dim, keep_attrs=keep_attrs, **kwargs) # Find valid windows based on count. counts = self._counts(keep_attrs=False) @@ -486,6 +485,7 @@ def _counts(self, keep_attrs: bool | None) -> DataArray: # array is faster to be reduced than object array. # The use of skipna==False is also faster since it does not need to # copy the strided array. + dim: Sequence[Hashable] = list(rolling_dim.values()) counts = ( self.obj.notnull(keep_attrs=keep_attrs) .rolling( @@ -493,7 +493,7 @@ def _counts(self, keep_attrs: bool | None) -> DataArray: center={d: self.center[i] for i, d in enumerate(self.dim)}, ) .construct(rolling_dim, fill_value=False, keep_attrs=keep_attrs) - .sum(dim=list(rolling_dim.values()), skipna=False, keep_attrs=keep_attrs) + .sum(dim=dim, skipna=False, keep_attrs=keep_attrs) ) return counts From b44b2ae2c529663163f32fb44abbcb6ff0962f41 Mon Sep 17 00:00:00 2001 From: Richard Kleijn Date: Fri, 18 Aug 2023 02:13:55 +0200 Subject: [PATCH 3/8] import Self from xarray.core.types --- xarray/core/_aggregations.py | 1190 +++++++++++++++++++++++++- xarray/util/generate_aggregations.py | 9 +- 2 files changed, 1189 insertions(+), 10 deletions(-) diff --git a/xarray/core/_aggregations.py b/xarray/core/_aggregations.py index 3315a6a68e5..89cec94e24f 100644 --- a/xarray/core/_aggregations.py +++ b/xarray/core/_aggregations.py @@ -6,11 +6,9 @@ from collections.abc import Sequence from typing import TYPE_CHECKING, Any, Callable -from typing_extensions import Self - from xarray.core import duck_array_ops from xarray.core.options import OPTIONS -from xarray.core.types import Dims +from xarray.core.types import Dims, Self from xarray.core.utils import contains_only_chunked_or_numpy, module_available if TYPE_CHECKING: @@ -85,8 +83,19 @@ def count( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.count() + + Dimensions: () + Data variables: + da int64 5 """ return self.reduce( duck_array_ops.count, @@ -146,8 +155,19 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.all() + + Dimensions: () + Data variables: + da bool False """ return self.reduce( duck_array_ops.array_all, @@ -207,8 +227,19 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.any() + + Dimensions: () + Data variables: + da bool True """ return self.reduce( duck_array_ops.array_any, @@ -274,12 +305,27 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.max() + + Dimensions: () + Data variables: + da float64 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.max(skipna=False) + + Dimensions: () + Data variables: + da float64 nan """ return self.reduce( duck_array_ops.max, @@ -346,12 +392,27 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.min() + + Dimensions: () + Data variables: + da float64 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.min(skipna=False) + + Dimensions: () + Data variables: + da float64 nan """ return self.reduce( duck_array_ops.min, @@ -422,12 +483,27 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.mean() + + Dimensions: () + Data variables: + da float64 1.6 Use ``skipna`` to control whether NaNs are ignored. >>> ds.mean(skipna=False) + + Dimensions: () + Data variables: + da float64 nan """ return self.reduce( duck_array_ops.mean, @@ -505,16 +581,35 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.prod() + + Dimensions: () + Data variables: + da float64 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.prod(skipna=False) + + Dimensions: () + Data variables: + da float64 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.prod(skipna=True, min_count=2) + + Dimensions: () + Data variables: + da float64 0.0 """ return self.reduce( duck_array_ops.prod, @@ -593,16 +688,35 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.sum() + + Dimensions: () + Data variables: + da float64 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.sum(skipna=False) + + Dimensions: () + Data variables: + da float64 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.sum(skipna=True, min_count=2) + + Dimensions: () + Data variables: + da float64 8.0 """ return self.reduce( duck_array_ops.sum, @@ -678,16 +792,35 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.std() + + Dimensions: () + Data variables: + da float64 1.02 Use ``skipna`` to control whether NaNs are ignored. >>> ds.std(skipna=False) + + Dimensions: () + Data variables: + da float64 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.std(skipna=True, ddof=1) + + Dimensions: () + Data variables: + da float64 1.14 """ return self.reduce( duck_array_ops.std, @@ -763,16 +896,35 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.var() + + Dimensions: () + Data variables: + da float64 1.04 Use ``skipna`` to control whether NaNs are ignored. >>> ds.var(skipna=False) + + Dimensions: () + Data variables: + da float64 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.var(skipna=True, ddof=1) + + Dimensions: () + Data variables: + da float64 1.3 """ return self.reduce( duck_array_ops.var, @@ -844,12 +996,27 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.median() + + Dimensions: () + Data variables: + da float64 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.median(skipna=False) + + Dimensions: () + Data variables: + da float64 nan """ return self.reduce( duck_array_ops.median, @@ -920,12 +1087,29 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.cumsum() + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 3.0 6.0 6.0 8.0 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumsum(skipna=False) + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 3.0 6.0 6.0 8.0 nan """ return self.reduce( duck_array_ops.cumsum, @@ -996,12 +1180,29 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.cumprod() + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 6.0 0.0 0.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumprod(skipna=False) + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 6.0 0.0 0.0 nan """ return self.reduce( duck_array_ops.cumprod, @@ -1077,8 +1278,15 @@ def count( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.count() + + array(5) """ return self.reduce( duck_array_ops.count, @@ -1136,8 +1344,15 @@ def all( ... ), ... ) >>> da + + array([ True, True, True, True, True, False]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.all() + + array(False) """ return self.reduce( duck_array_ops.array_all, @@ -1195,8 +1410,15 @@ def any( ... ), ... ) >>> da + + array([ True, True, True, True, True, False]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.any() + + array(True) """ return self.reduce( duck_array_ops.array_any, @@ -1260,12 +1482,21 @@ def max( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.max() + + array(3.) Use ``skipna`` to control whether NaNs are ignored. >>> da.max(skipna=False) + + array(nan) """ return self.reduce( duck_array_ops.max, @@ -1330,12 +1561,21 @@ def min( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.min() + + array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.min(skipna=False) + + array(nan) """ return self.reduce( duck_array_ops.min, @@ -1404,12 +1644,21 @@ def mean( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.mean() + + array(1.6) Use ``skipna`` to control whether NaNs are ignored. >>> da.mean(skipna=False) + + array(nan) """ return self.reduce( duck_array_ops.mean, @@ -1485,16 +1734,27 @@ def prod( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.prod() + + array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.prod(skipna=False) + + array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.prod(skipna=True, min_count=2) + + array(0.) """ return self.reduce( duck_array_ops.prod, @@ -1571,16 +1831,27 @@ def sum( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.sum() + + array(8.) Use ``skipna`` to control whether NaNs are ignored. >>> da.sum(skipna=False) + + array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.sum(skipna=True, min_count=2) + + array(8.) """ return self.reduce( duck_array_ops.sum, @@ -1654,16 +1925,27 @@ def std( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.std() + + array(1.0198039) Use ``skipna`` to control whether NaNs are ignored. >>> da.std(skipna=False) + + array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.std(skipna=True, ddof=1) + + array(1.14017543) """ return self.reduce( duck_array_ops.std, @@ -1737,16 +2019,27 @@ def var( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.var() + + array(1.04) Use ``skipna`` to control whether NaNs are ignored. >>> da.var(skipna=False) + + array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.var(skipna=True, ddof=1) + + array(1.3) """ return self.reduce( duck_array_ops.var, @@ -1816,12 +2109,21 @@ def median( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.median() + + array(2.) Use ``skipna`` to control whether NaNs are ignored. >>> da.median(skipna=False) + + array(nan) """ return self.reduce( duck_array_ops.median, @@ -1890,12 +2192,27 @@ def cumsum( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.cumsum() + + array([1., 3., 6., 6., 8., 8.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.cumsum(skipna=False) + + array([ 1., 3., 6., 6., 8., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.cumprod() + + array([1., 2., 6., 0., 0., 0.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.cumprod(skipna=False) + + array([ 1., 2., 6., 0., 0., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").count() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) int64 1 2 2 """ if ( flox_available @@ -2147,8 +2492,21 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").all() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) bool False True True """ if ( flox_available @@ -2232,8 +2590,21 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").any() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) bool True True True """ if ( flox_available @@ -2323,12 +2694,31 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").max() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 1.0 2.0 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").max(skipna=False) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 2.0 3.0 """ if ( flox_available @@ -2420,12 +2810,31 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").min() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 1.0 2.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").min(skipna=False) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 2.0 0.0 """ if ( flox_available @@ -2519,12 +2928,31 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").mean() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").mean(skipna=False) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 2.0 1.5 """ if ( flox_available @@ -2625,16 +3053,41 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").prod() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 1.0 4.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").prod(skipna=False) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 4.0 0.0 Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").prod(skipna=True, min_count=2) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 4.0 0.0 """ if ( flox_available @@ -2737,16 +3190,41 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").sum() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 1.0 4.0 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").sum(skipna=False) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 4.0 3.0 Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").sum(skipna=True, min_count=2) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 4.0 3.0 """ if ( flox_available @@ -2846,16 +3324,41 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").std() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 0.0 0.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").std(skipna=False) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 0.0 1.5 Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").std(skipna=True, ddof=1) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 0.0 2.121 """ if ( flox_available @@ -2955,16 +3458,41 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").var() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 0.0 0.0 2.25 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").var(skipna=False) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 0.0 2.25 Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").var(skipna=True, ddof=1) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 0.0 4.5 """ if ( flox_available @@ -3060,12 +3588,31 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").median() + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").median(skipna=False) + + Dimensions: (labels: 3) + Coordinates: + * labels (labels) object 'a' 'b' 'c' + Data variables: + da (labels) float64 nan 2.0 1.5 """ return self.reduce( duck_array_ops.median, @@ -3144,12 +3691,29 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").cumsum() + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 3.0 3.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumsum(skipna=False) + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 3.0 3.0 4.0 nan """ return self.reduce( duck_array_ops.cumsum, @@ -3228,12 +3792,29 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.groupby("labels").cumprod() + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 3.0 0.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumprod(skipna=False) + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 3.0 0.0 4.0 nan """ return self.reduce( duck_array_ops.cumprod, @@ -3327,8 +3908,21 @@ def count( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").count() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) int64 1 3 1 """ if ( flox_available @@ -3412,8 +4006,21 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").all() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) bool True True False """ if ( flox_available @@ -3497,8 +4104,21 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").any() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) bool True True True """ if ( flox_available @@ -3588,12 +4208,31 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").max() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 3.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").max(skipna=False) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 3.0 nan """ if ( flox_available @@ -3685,12 +4324,31 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").min() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").min(skipna=False) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 0.0 nan """ if ( flox_available @@ -3784,12 +4442,31 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").mean() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 1.667 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").mean(skipna=False) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 1.667 nan """ if ( flox_available @@ -3890,16 +4567,41 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").prod() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").prod(skipna=False) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 0.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3M").prod(skipna=True, min_count=2) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 nan 0.0 nan """ if ( flox_available @@ -4002,16 +4704,41 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").sum() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 5.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").sum(skipna=False) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 5.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3M").sum(skipna=True, min_count=2) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 nan 5.0 nan """ if ( flox_available @@ -4111,16 +4838,41 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").std() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 0.0 1.247 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").std(skipna=False) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 0.0 1.247 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3M").std(skipna=True, ddof=1) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 nan 1.528 nan """ if ( flox_available @@ -4220,16 +4972,41 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").var() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 0.0 1.556 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").var(skipna=False) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 0.0 1.556 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3M").var(skipna=True, ddof=1) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 nan 2.333 nan """ if ( flox_available @@ -4325,12 +5102,31 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").median() + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").median(skipna=False) + + Dimensions: (time: 3) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 + Data variables: + da (time) float64 1.0 2.0 nan """ return self.reduce( duck_array_ops.median, @@ -4409,12 +5205,29 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").cumsum() + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 5.0 5.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").cumsum(skipna=False) + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 5.0 5.0 2.0 nan """ return self.reduce( duck_array_ops.cumsum, @@ -4493,12 +5306,29 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds + + Dimensions: (time: 6) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> ds.resample(time="3M").cumprod() + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 6.0 0.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3M").cumprod(skipna=False) + + Dimensions: (time: 6) + Dimensions without coordinates: time + Data variables: + da (time) float64 1.0 2.0 6.0 0.0 2.0 nan """ return self.reduce( duck_array_ops.cumprod, @@ -4591,8 +5421,17 @@ def count( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").count() + + array([1, 2, 2]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -4673,8 +5512,17 @@ def all( ... ), ... ) >>> da + + array([ True, True, True, True, True, False]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").all() + + array([False, True, True]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -4755,8 +5603,17 @@ def any( ... ), ... ) >>> da + + array([ True, True, True, True, True, False]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").any() + + array([ True, True, True]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -4843,12 +5700,25 @@ def max( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").max() + + array([1., 2., 3.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").max(skipna=False) + + array([nan, 2., 3.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -4937,12 +5807,25 @@ def min( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").min() + + array([1., 2., 0.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").min(skipna=False) + + array([nan, 2., 0.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5033,12 +5916,25 @@ def mean( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").mean() + + array([1. , 2. , 1.5]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").mean(skipna=False) + + array([nan, 2. , 1.5]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5136,16 +6032,33 @@ def prod( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").prod() + + array([1., 4., 0.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").prod(skipna=False) + + array([nan, 4., 0.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").prod(skipna=True, min_count=2) + + array([nan, 4., 0.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5245,16 +6158,33 @@ def sum( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").sum() + + array([1., 4., 3.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").sum(skipna=False) + + array([nan, 4., 3.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").sum(skipna=True, min_count=2) + + array([nan, 4., 3.]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5351,16 +6281,33 @@ def std( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").std() + + array([0. , 0. , 1.5]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").std(skipna=False) + + array([nan, 0. , 1.5]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").std(skipna=True, ddof=1) + + array([ nan, 0. , 2.12132034]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5457,16 +6404,33 @@ def var( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").var() + + array([0. , 0. , 2.25]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").var(skipna=False) + + array([ nan, 0. , 2.25]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").var(skipna=True, ddof=1) + + array([nan, 0. , 4.5]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5559,12 +6523,25 @@ def median( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").median() + + array([1. , 2. , 1.5]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").median(skipna=False) + + array([nan, 2. , 1.5]) + Coordinates: + * labels (labels) object 'a' 'b' 'c' """ return self.reduce( duck_array_ops.median, @@ -5641,12 +6618,27 @@ def cumsum( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").cumsum() + + array([1., 2., 3., 3., 4., 1.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").cumsum(skipna=False) + + array([ 1., 2., 3., 3., 4., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").cumprod() + + array([1., 2., 3., 0., 4., 1.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.groupby("labels").cumprod(skipna=False) + + array([ 1., 2., 3., 0., 4., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").count() + + array([1, 3, 1]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -5902,8 +6918,17 @@ def all( ... ), ... ) >>> da + + array([ True, True, True, True, True, False]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").all() + + array([ True, True, False]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -5984,8 +7009,17 @@ def any( ... ), ... ) >>> da + + array([ True, True, True, True, True, False]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").any() + + array([ True, True, True]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6072,12 +7106,25 @@ def max( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").max() + + array([1., 3., 2.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").max(skipna=False) + + array([ 1., 3., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6166,12 +7213,25 @@ def min( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").min() + + array([1., 0., 2.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").min(skipna=False) + + array([ 1., 0., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6262,12 +7322,25 @@ def mean( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").mean() + + array([1. , 1.66666667, 2. ]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").mean(skipna=False) + + array([1. , 1.66666667, nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6365,16 +7438,33 @@ def prod( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").prod() + + array([1., 0., 2.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").prod(skipna=False) + + array([ 1., 0., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3M").prod(skipna=True, min_count=2) + + array([nan, 0., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6474,16 +7564,33 @@ def sum( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").sum() + + array([1., 5., 2.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").sum(skipna=False) + + array([ 1., 5., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3M").sum(skipna=True, min_count=2) + + array([nan, 5., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6580,16 +7687,33 @@ def std( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").std() + + array([0. , 1.24721913, 0. ]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").std(skipna=False) + + array([0. , 1.24721913, nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3M").std(skipna=True, ddof=1) + + array([ nan, 1.52752523, nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6686,16 +7810,33 @@ def var( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").var() + + array([0. , 1.55555556, 0. ]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").var(skipna=False) + + array([0. , 1.55555556, nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3M").var(skipna=True, ddof=1) + + array([ nan, 2.33333333, nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6788,12 +7929,25 @@ def median( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").median() + + array([1., 2., 2.]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3M").median(skipna=False) + + array([ 1., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ return self.reduce( duck_array_ops.median, @@ -6870,12 +8024,27 @@ def cumsum( ... ), ... ) >>> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").cumsum() + + array([1., 2., 5., 5., 2., 2.]) + Coordinates: + labels (time) >> da.resample(time="3M").cumsum(skipna=False) + + array([ 1., 2., 5., 5., 2., nan]) + Coordinates: + labels (time) >> da + + array([ 1., 2., 3., 0., 2., nan]) + Coordinates: + * time (time) datetime64[ns] 2001-01-31 2001-02-28 ... 2001-06-30 + labels (time) >> da.resample(time="3M").cumprod() + + array([1., 2., 6., 0., 2., 2.]) + Coordinates: + labels (time) >> da.resample(time="3M").cumprod(skipna=False) + + array([ 1., 2., 6., 0., 2., nan]) + Coordinates: + labels (time) Date: Fri, 18 Aug 2023 02:29:25 +0200 Subject: [PATCH 4/8] fix typing errors --- xarray/core/computation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 685307fc8c3..518ae2f3026 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1402,14 +1402,14 @@ def _cov_corr( ) / (valid_count) if method == "cov": - return cov # type: ignore[return-value] + return cov # type: ignore[return-value,unused-ignore] else: # compute std + corr da_a_std = da_a.std(dim=dim) da_b_std = da_b.std(dim=dim) corr = cov / (da_a_std * da_b_std) - return corr # type: ignore[return-value] + return corr # type: ignore[return-value,unused-ignore] def cross( From 377b359eb29cc82cf7f901c8382b46925c6f4d89 Mon Sep 17 00:00:00 2001 From: Richard Kleijn Date: Fri, 18 Aug 2023 02:38:34 +0200 Subject: [PATCH 5/8] fix remaining mypy error --- xarray/core/dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index bdf2d8babe1..ebd9611e2bf 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -500,7 +500,7 @@ def __setitem__(self, key, value) -> None: self.dataset[dim_indexers] = value -class Dataset( +class Dataset( # type: ignore[misc,unused-ignore] DataWithCoords, DatasetAggregations, DatasetArithmetic, From 433c59d3b758fcdabf58446f5f5847787db71765 Mon Sep 17 00:00:00 2001 From: Michael Niklas Date: Wed, 13 Sep 2023 21:12:24 +0200 Subject: [PATCH 6/8] fix some typing issues --- xarray/core/computation.py | 4 ++-- xarray/core/dataset.py | 2 +- xarray/core/rolling.py | 6 +++--- xarray/tests/test_groupby.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/xarray/core/computation.py b/xarray/core/computation.py index ef0bd8627c7..97a1e15b1d9 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1402,14 +1402,14 @@ def _cov_corr( ) / (valid_count) if method == "cov": - return cov # type: ignore[return-value,unused-ignore] + return cov else: # compute std + corr da_a_std = da_a.std(dim=dim) da_b_std = da_b.std(dim=dim) corr = cov / (da_a_std * da_b_std) - return corr # type: ignore[return-value,unused-ignore] + return corr def cross( diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 3749254c6d2..a96d1f34c98 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -502,7 +502,7 @@ def __setitem__(self, key, value) -> None: self.dataset[dim_indexers] = value -class Dataset( # type: ignore[misc,unused-ignore] +class Dataset( DataWithCoords, DatasetAggregations, DatasetArithmetic, diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index 5988797ac1d..d49cb6e13a4 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -4,7 +4,7 @@ import itertools import math import warnings -from collections.abc import Hashable, Iterator, Mapping, Sequence +from collections.abc import Hashable, Iterator, Mapping from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar import numpy as np @@ -475,7 +475,7 @@ def reduce( obj, rolling_dim, keep_attrs=keep_attrs, fill_value=fillna ) - dim: Sequence[Hashable] = list(rolling_dim.values()) + dim = list(rolling_dim.values()) result = windows.reduce(func, dim=dim, keep_attrs=keep_attrs, **kwargs) # Find valid windows based on count. @@ -493,7 +493,7 @@ def _counts(self, keep_attrs: bool | None) -> DataArray: # array is faster to be reduced than object array. # The use of skipna==False is also faster since it does not need to # copy the strided array. - dim: Sequence[Hashable] = list(rolling_dim.values()) + dim = list(rolling_dim.values()) counts = ( self.obj.notnull(keep_attrs=keep_attrs) .rolling( diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py index b961abef5db..4b0dde78b04 100644 --- a/xarray/tests/test_groupby.py +++ b/xarray/tests/test_groupby.py @@ -728,7 +728,7 @@ def test_groupby_dataset_iter() -> None: def test_groupby_dataset_errors() -> None: data = create_test_data() with pytest.raises(TypeError, match=r"`group` must be"): - data.groupby(np.arange(10)) # type: ignore + data.groupby(np.arange(10)) with pytest.raises(ValueError, match=r"length does not match"): data.groupby(data["dim1"][:3]) with pytest.raises(TypeError, match=r"`group` must be"): From 41bb91477969d1ab043a18abd6ae39c45d5e4dbc Mon Sep 17 00:00:00 2001 From: Michael Niklas Date: Wed, 13 Sep 2023 21:20:36 +0200 Subject: [PATCH 7/8] add whats-new --- doc/whats-new.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index e71c7df49d0..91d64ddb7a3 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -26,7 +26,10 @@ New Features different collections of coordinates prior to assign them to a Dataset or DataArray (:pull:`8102`) at once. By `BenoƮt Bovy `_. -- Provide `preferred_chunks` for data read from netcdf files (:issue:`1440`, :pull:`7948`) +- Provide `preferred_chunks` for data read from netcdf files (:issue:`1440`, :pull:`7948`). + By `Martin Raspaud `_. +- Improved static typing of reduction methods (:pull:`6746`). + By `Richard Kleijn `_. Breaking changes ~~~~~~~~~~~~~~~~ From 0277baad6a3816bdc6ecd4c61f935347ac72b118 Mon Sep 17 00:00:00 2001 From: Michael Niklas Date: Wed, 13 Sep 2023 21:22:29 +0200 Subject: [PATCH 8/8] re-add type ignore --- xarray/tests/test_groupby.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py index 4b0dde78b04..e143e2b8e03 100644 --- a/xarray/tests/test_groupby.py +++ b/xarray/tests/test_groupby.py @@ -728,7 +728,7 @@ def test_groupby_dataset_iter() -> None: def test_groupby_dataset_errors() -> None: data = create_test_data() with pytest.raises(TypeError, match=r"`group` must be"): - data.groupby(np.arange(10)) + data.groupby(np.arange(10)) # type: ignore[arg-type,unused-ignore] with pytest.raises(ValueError, match=r"length does not match"): data.groupby(data["dim1"][:3]) with pytest.raises(TypeError, match=r"`group` must be"):