diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index da4ad32b1f5..4d59fe0531f 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -42,7 +42,7 @@ jobs: matrix: os: ["ubuntu-latest", "macos-latest", "windows-latest"] # Bookend python versions - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.9", "3.11"] env: [""] include: # Minimum python version: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e02b7d0bd08..c2586a12aa2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ repos: files: ^xarray/ - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: 'v0.0.282' + rev: 'v0.0.287' hooks: - id: ruff args: ["--fix"] @@ -35,7 +35,7 @@ repos: additional_dependencies: ["black==23.7.0"] - id: blackdoc-autoupdate-black - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.4.1 + rev: v1.5.1 hooks: - id: mypy # Copied from setup.cfg diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 7be7541a79b..58a05aeddce 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -1056,8 +1056,8 @@ def open_mfdataset( ) else: raise ValueError( - "{} is an invalid option for the keyword argument" - " ``combine``".format(combine) + f"{combine} is an invalid option for the keyword argument" + " ``combine``" ) except ValueError: for ds in datasets: diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py index a746163c3fd..0b469ae26fc 100644 --- a/xarray/coding/cftime_offsets.py +++ b/xarray/coding/cftime_offsets.py @@ -105,7 +105,7 @@ def __init__(self, n: int = 1): if not isinstance(n, int): raise TypeError( "The provided multiple 'n' must be an integer. " - "Instead a value of type {!r} was provided.".format(type(n)) + f"Instead a value of type {type(n)!r} was provided." ) self.n = n @@ -353,13 +353,13 @@ def _validate_month(month, default_month): raise TypeError( "'self.month' must be an integer value between 1 " "and 12. Instead, it was set to a value of " - "{!r}".format(result_month) + f"{result_month!r}" ) elif not (1 <= result_month <= 12): raise ValueError( "'self.month' must be an integer value between 1 " "and 12. Instead, it was set to a value of " - "{!r}".format(result_month) + f"{result_month!r}" ) return result_month @@ -771,7 +771,7 @@ def to_cftime_datetime(date_str_or_date, calendar=None): raise TypeError( "date_str_or_date must be a string or a " "subclass of cftime.datetime. Instead got " - "{!r}.".format(date_str_or_date) + f"{date_str_or_date!r}." ) diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py index 8f3472dce19..a0800db445a 100644 --- a/xarray/coding/cftimeindex.py +++ b/xarray/coding/cftimeindex.py @@ -228,12 +228,12 @@ def assert_all_valid_date_type(data): if not isinstance(sample, cftime.datetime): raise TypeError( "CFTimeIndex requires cftime.datetime " - "objects. Got object of {}.".format(date_type) + f"objects. Got object of {date_type}." ) if not all(isinstance(value, date_type) for value in data): raise TypeError( "CFTimeIndex requires using datetime " - "objects of all the same type. Got\n{}.".format(data) + f"objects of all the same type. Got\n{data}." ) @@ -553,8 +553,7 @@ def shift(self, n: int | float, freq: str | timedelta): return self + n * to_offset(freq) else: raise TypeError( - "'freq' must be of type " - "str or datetime.timedelta, got {}.".format(freq) + "'freq' must be of type " f"str or datetime.timedelta, got {freq}." ) def __add__(self, other): @@ -636,10 +635,10 @@ def to_datetimeindex(self, unsafe=False): if calendar not in _STANDARD_CALENDARS and not unsafe: warnings.warn( "Converting a CFTimeIndex with dates from a non-standard " - "calendar, {!r}, to a pandas.DatetimeIndex, which uses dates " + f"calendar, {calendar!r}, to a pandas.DatetimeIndex, which uses dates " "from the standard calendar. This may lead to subtle errors " "in operations that depend on the length of time between " - "dates.".format(calendar), + "dates.", RuntimeWarning, stacklevel=2, ) diff --git a/xarray/coding/strings.py b/xarray/coding/strings.py index d0bfb1a7a63..d10af65c44a 100644 --- a/xarray/coding/strings.py +++ b/xarray/coding/strings.py @@ -59,9 +59,9 @@ def encode(self, variable, name=None): if contains_unicode and (encode_as_char or not self.allows_unicode): if "_FillValue" in attrs: raise NotImplementedError( - "variable {!r} has a _FillValue specified, but " + f"variable {name!r} has a _FillValue specified, but " "_FillValue is not yet supported on unicode strings: " - "https://github.com/pydata/xarray/issues/1647".format(name) + "https://github.com/pydata/xarray/issues/1647" ) string_encoding = encoding.pop("_Encoding", "utf-8") @@ -176,7 +176,7 @@ def char_to_bytes(arr): if len(arr.chunks[-1]) > 1: raise ValueError( "cannot stacked dask character array with " - "multiple chunks in the last dimension: {}".format(arr) + f"multiple chunks in the last dimension: {arr}" ) dtype = np.dtype("S" + str(arr.shape[-1])) diff --git a/xarray/coding/times.py b/xarray/coding/times.py index cb6a34c4ddf..7046dc30443 100644 --- a/xarray/coding/times.py +++ b/xarray/coding/times.py @@ -233,8 +233,8 @@ def _decode_datetime_with_pandas( ) -> np.ndarray: if not _is_standard_calendar(calendar): raise OutOfBoundsDatetime( - "Cannot decode times from a non-standard calendar, {!r}, using " - "pandas.".format(calendar) + f"Cannot decode times from a non-standard calendar, {calendar!r}, using " + "pandas." ) delta, ref_date = _unpack_netcdf_time_units(units) @@ -470,8 +470,8 @@ def cftime_to_nptime(times, raise_on_invalid: bool = True) -> np.ndarray: except ValueError as e: if raise_on_invalid: raise ValueError( - "Cannot convert date {} to a date in the " - "standard calendar. Reason: {}.".format(t, e) + f"Cannot convert date {t} to a date in the " + f"standard calendar. Reason: {e}." ) else: dt = "NaT" @@ -503,10 +503,8 @@ def convert_times(times, date_type, raise_on_invalid: bool = True) -> np.ndarray except ValueError as e: if raise_on_invalid: raise ValueError( - "Cannot convert date {} to a date in the " - "{} calendar. Reason: {}.".format( - t, date_type(2000, 1, 1).calendar, e - ) + f"Cannot convert date {t} to a date in the " + f"{date_type(2000, 1, 1).calendar} calendar. Reason: {e}." ) else: dt = np.NaN diff --git a/xarray/coding/variables.py b/xarray/coding/variables.py index 5e655565098..d694c531b15 100644 --- a/xarray/coding/variables.py +++ b/xarray/coding/variables.py @@ -179,10 +179,10 @@ def safe_setitem(dest, key: Hashable, value, name: T_Name = None): if key in dest: var_str = f" on variable {name!r}" if name else "" raise ValueError( - "failed to prevent overwriting existing key {} in attrs{}. " + f"failed to prevent overwriting existing key {key} in attrs{var_str}. " "This is probably an encoding field used by xarray to describe " "how a variable is serialized. To proceed, remove this key from " - "the variable's attributes manually.".format(key, var_str) + "the variable's attributes manually." ) dest[key] = value diff --git a/xarray/conventions.py b/xarray/conventions.py index 5a6675d60c1..596831e270a 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -75,20 +75,18 @@ def _infer_dtype(array, name: T_Name = None) -> np.dtype: return dtype raise ValueError( - "unable to infer dtype on variable {!r}; xarray " - "cannot serialize arbitrary Python objects".format(name) + f"unable to infer dtype on variable {name!r}; xarray " + "cannot serialize arbitrary Python objects" ) def ensure_not_multiindex(var: Variable, name: T_Name = None) -> None: if isinstance(var, IndexVariable) and isinstance(var.to_index(), pd.MultiIndex): raise NotImplementedError( - "variable {!r} is a MultiIndex, which cannot yet be " + f"variable {name!r} is a MultiIndex, which cannot yet be " "serialized to netCDF files. Instead, either use reset_index() " "to convert MultiIndex levels into coordinate variables instead " - "or use https://cf-xarray.readthedocs.io/en/latest/coding.html.".format( - name - ) + "or use https://cf-xarray.readthedocs.io/en/latest/coding.html." ) @@ -114,11 +112,11 @@ def ensure_dtype_not_object(var: Variable, name: T_Name = None) -> Variable: if is_duck_dask_array(data): warnings.warn( - "variable {} has data in the form of a dask array with " + f"variable {name} has data in the form of a dask array with " "dtype=object, which means it is being loaded into memory " "to determine a data type that can be safely stored on disk. " "To avoid this, coerce this variable to a fixed-size dtype " - "with astype() before saving it.".format(name), + "with astype() before saving it.", SerializationWarning, ) data = data.compute() @@ -635,9 +633,9 @@ def _encode_coordinates(variables, attributes, non_dim_coord_names): for name in list(non_dim_coord_names): if isinstance(name, str) and " " in name: warnings.warn( - "coordinate {!r} has a space in its name, which means it " + f"coordinate {name!r} has a space in its name, which means it " "cannot be marked as a coordinate on disk and will be " - "saved as a data variable instead".format(name), + "saved as a data variable instead", SerializationWarning, stacklevel=6, ) diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index 39ff878b56d..d2bbc459d83 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -839,7 +839,7 @@ def is_alignable(obj): elif raise_on_invalid: raise ValueError( "object to align is neither an xarray.Dataset, " - "an xarray.DataArray nor a dictionary: {!r}".format(variables) + f"an xarray.DataArray nor a dictionary: {variables!r}" ) else: out.append(variables) diff --git a/xarray/core/arithmetic.py b/xarray/core/arithmetic.py index 5b2cf38ee2e..5cdbc732741 100644 --- a/xarray/core/arithmetic.py +++ b/xarray/core/arithmetic.py @@ -56,10 +56,10 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): if ufunc.signature is not None: raise NotImplementedError( - "{} not supported: xarray objects do not directly implement " + f"{ufunc} not supported: xarray objects do not directly implement " "generalized ufuncs. Instead, use xarray.apply_ufunc or " "explicitly convert to xarray objects to NumPy arrays " - "(e.g., with `.values`).".format(ufunc) + "(e.g., with `.values`)." ) if method != "__call__": diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 1599fb60ddc..eecd01d011e 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -109,9 +109,9 @@ def _infer_concat_order_from_coords(datasets): ascending = False else: raise ValueError( - "Coordinate variable {} is neither " + f"Coordinate variable {dim} is neither " "monotonically increasing nor " - "monotonically decreasing on all datasets".format(dim) + "monotonically decreasing on all datasets" ) # Assume that any two datasets whose coord along dim starts @@ -221,10 +221,8 @@ def _combine_nd( n_dims = len(example_tile_id) if len(concat_dims) != n_dims: raise ValueError( - "concat_dims has length {} but the datasets " - "passed are nested in a {}-dimensional structure".format( - len(concat_dims), n_dims - ) + f"concat_dims has length {len(concat_dims)} but the datasets " + f"passed are nested in a {n_dims}-dimensional structure" ) # Each iteration of this loop reduces the length of the tile_ids tuples @@ -646,7 +644,7 @@ def _combine_single_variable_hypercube( if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing): raise ValueError( "Resulting object does not have monotonic" - " global indexes along dimension {}".format(dim) + f" global indexes along dimension {dim}" ) return concatenated diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 685307fc8c3..fe89672e392 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1286,7 +1286,7 @@ def cov( if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]): raise TypeError( "Only xr.DataArray is supported." - "Given {}.".format([type(arr) for arr in [da_a, da_b]]) + f"Given {[type(arr) for arr in [da_a, da_b]]}." ) return _cov_corr(da_a, da_b, dim=dim, ddof=ddof, method="cov") @@ -1364,7 +1364,7 @@ def corr(da_a: T_DataArray, da_b: T_DataArray, dim: Dims = None) -> T_DataArray: if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]): raise TypeError( "Only xr.DataArray is supported." - "Given {}.".format([type(arr) for arr in [da_a, da_b]]) + f"Given {[type(arr) for arr in [da_a, da_b]]}." ) return _cov_corr(da_a, da_b, dim=dim, method="corr") @@ -1707,7 +1707,7 @@ def dot( if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays): raise TypeError( "Only xr.DataArray and xr.Variable are supported." - "Given {}.".format([type(arr) for arr in arrays]) + f"Given {[type(arr) for arr in arrays]}." ) if len(arrays) == 0: diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index bebf9362532..489b6f0d04e 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -130,7 +130,7 @@ def to_index(self, ordered_dims: Sequence[Hashable] | None = None) -> pd.Index: elif set(ordered_dims) != set(self.dims): raise ValueError( "ordered_dims must match dims, but does not: " - "{} vs {}".format(ordered_dims, self.dims) + f"{ordered_dims} vs {self.dims}" ) if len(ordered_dims) == 0: diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 5a68fc7ffac..dc0b2032a37 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -2234,8 +2234,7 @@ def interp( """ if self.dtype.kind not in "uifc": raise TypeError( - "interp only works for a numeric type array. " - "Given {}.".format(self.dtype) + "interp only works for a numeric type array. " f"Given {self.dtype}." ) ds = self._to_temp_dataset().interp( coords, @@ -2362,8 +2361,7 @@ def interp_like( """ if self.dtype.kind not in "uifc": raise TypeError( - "interp only works for a numeric type array. " - "Given {}.".format(self.dtype) + "interp only works for a numeric type array. " f"Given {self.dtype}." ) ds = self._to_temp_dataset().interp_like( other, method=method, kwargs=kwargs, assume_sorted=assume_sorted @@ -4325,7 +4323,7 @@ def from_dict(cls: type[T_DataArray], d: Mapping[str, Any]) -> T_DataArray: except KeyError as e: raise ValueError( "cannot convert dict when coords are missing the key " - "'{dims_data}'".format(dims_data=str(e.args[0])) + f"'{str(e.args[0])}'" ) try: data = d["data"] diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index f1a0cb9dc34..97f528aea7d 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -132,6 +132,7 @@ DatetimeLike, DatetimeUnitOptions, Dims, + DsCompatible, ErrorOptions, ErrorOptionsWithWarn, InterpOptions, @@ -696,6 +697,11 @@ def __init__( self._dims = dims self._indexes = indexes + # TODO: dirty workaround for mypy 1.5 error with inherited DatasetOpsMixin vs. Mapping + # related to https://github.com/python/mypy/issues/9319? + def __eq__(self: T_Dataset, other: DsCompatible) -> T_Dataset: # type: ignore[override] + return super().__eq__(other) + @classmethod def load_store(cls: type[T_Dataset], store, decoder=None) -> T_Dataset: """Create a new dataset from the contents of a backends.*DataStore @@ -1334,13 +1340,13 @@ def _copy( if keys_not_in_vars: raise ValueError( "Data must only contain variables in original " - "dataset. Extra variables: {}".format(keys_not_in_vars) + f"dataset. Extra variables: {keys_not_in_vars}" ) keys_missing_from_data = var_keys - data_keys if keys_missing_from_data: raise ValueError( "Data must contain all variables in original " - "dataset. Data is missing {}".format(keys_missing_from_data) + f"dataset. Data is missing {keys_missing_from_data}" ) indexes, index_vars = self.xindexes.copy_indexes(deep=deep) @@ -2697,7 +2703,7 @@ def _validate_indexers( if v.ndim > 1: raise IndexError( "Unlabeled multi-dimensional array cannot be " - "used for indexing: {}".format(k) + f"used for indexing: {k}" ) yield k, v @@ -2737,9 +2743,9 @@ def _get_indexers_coords_and_indexes(self, indexers): if v.dtype.kind == "b": if v.ndim != 1: # we only support 1-d boolean array raise ValueError( - "{:d}d-boolean array is used for indexing along " - "dimension {!r}, but only 1d boolean arrays are " - "supported.".format(v.ndim, k) + f"{v.ndim:d}d-boolean array is used for indexing along " + f"dimension {k!r}, but only 1d boolean arrays are " + "supported." ) # Make sure in case of boolean DataArray, its # coordinate also should be indexed. @@ -3881,7 +3887,7 @@ def _validate_interp_indexer(x, new_x): "coordinate, the coordinates to " "interpolate to must be either datetime " "strings or datetimes. " - "Instead got\n{}".format(new_x) + f"Instead got\n{new_x}" ) return x, new_x @@ -4531,8 +4537,7 @@ def expand_dims( raise ValueError(f"Dimension {d} already exists.") if d in self._variables and not utils.is_scalar(self._variables[d]): raise ValueError( - "{dim} already exists as coordinate or" - " variable name.".format(dim=d) + f"{d} already exists as coordinate or" " variable name." ) variables: dict[Hashable, Variable] = {} @@ -4555,8 +4560,7 @@ def expand_dims( pass # Do nothing if the dimensions value is just an int else: raise TypeError( - "The value of new dimension {k} must be " - "an iterable or an int".format(k=k) + f"The value of new dimension {k} must be " "an iterable or an int" ) for k, v in self._variables.items(): @@ -5282,7 +5286,7 @@ def to_stacked_array( if not dims_include_sample_dims: raise ValueError( "All variables in the dataset must contain the " - "dimensions {}.".format(dims) + f"dimensions {dims}." ) def ensure_stackable(val): @@ -7020,8 +7024,8 @@ def _normalize_dim_order( dim_order = list(self.dims) elif set(dim_order) != set(self.dims): raise ValueError( - "dim_order {} does not match the set of dimensions of this " - "Dataset: {}".format(dim_order, list(self.dims)) + f"dim_order {dim_order} does not match the set of dimensions of this " + f"Dataset: {list(self.dims)}" ) ordered_dims = {k: self.dims[k] for k in dim_order} @@ -7452,8 +7456,7 @@ def from_dict(cls: type[T_Dataset], d: Mapping[Any, Any]) -> T_Dataset: } except KeyError as e: raise ValueError( - "cannot convert dict without the key " - "'{dims_data}'".format(dims_data=str(e.args[0])) + "cannot convert dict without the key " f"'{str(e.args[0])}'" ) obj = cls(variable_dict) @@ -8169,8 +8172,8 @@ def differentiate( coord_var = self[coord].variable if coord_var.ndim != 1: raise ValueError( - "Coordinate {} must be 1 dimensional but is {}" - " dimensional".format(coord, coord_var.ndim) + f"Coordinate {coord} must be 1 dimensional but is {coord_var.ndim}" + " dimensional" ) dim = coord_var.dims[0] @@ -8271,8 +8274,8 @@ def _integrate_one(self, coord, datetime_unit=None, cumulative=False): coord_var = self[coord].variable if coord_var.ndim != 1: raise ValueError( - "Coordinate {} must be 1 dimensional but is {}" - " dimensional".format(coord, coord_var.ndim) + f"Coordinate {coord} must be 1 dimensional but is {coord_var.ndim}" + " dimensional" ) dim = coord_var.dims[0] diff --git a/xarray/core/formatting_html.py b/xarray/core/formatting_html.py index 60bb901c31a..d949cbdfbd1 100644 --- a/xarray/core/formatting_html.py +++ b/xarray/core/formatting_html.py @@ -65,10 +65,10 @@ def summarize_attrs(attrs): def _icon(icon_name): # icon_name should be defined in xarray/static/html/icon-svg-inline.html return ( - "" - "" + f"" + f"" "" - "".format(icon_name) + "" ) diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py index acab9ccc60b..7969ded3102 100644 --- a/xarray/core/indexing.py +++ b/xarray/core/indexing.py @@ -1303,7 +1303,7 @@ def __init__(self, array): if not isinstance(array, np.ndarray): raise TypeError( "NumpyIndexingAdapter only wraps np.ndarray. " - "Trying to wrap {}".format(type(array)) + f"Trying to wrap {type(array)}" ) self.array = array diff --git a/xarray/core/variable.py b/xarray/core/variable.py index c89545c43ae..05f9930aacd 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -759,18 +759,18 @@ def _validate_indexers(self, key): if k.ndim > 1: raise IndexError( "Unlabeled multi-dimensional array cannot be " - "used for indexing: {}".format(k) + f"used for indexing: {k}" ) if k.dtype.kind == "b": if self.shape[self.get_axis_num(dim)] != len(k): raise IndexError( - "Boolean array size {:d} is used to index array " - "with shape {:s}.".format(len(k), str(self.shape)) + f"Boolean array size {len(k):d} is used to index array " + f"with shape {str(self.shape):s}." ) if k.ndim > 1: raise IndexError( - "{}-dimensional boolean indexing is " - "not supported. ".format(k.ndim) + f"{k.ndim}-dimensional boolean indexing is " + "not supported. " ) if is_duck_dask_array(k.data): raise KeyError( @@ -783,9 +783,7 @@ def _validate_indexers(self, key): raise IndexError( "Boolean indexer should be unlabeled or on the " "same dimension to the indexed array. Indexer is " - "on {:s} but the target dimension is {:s}.".format( - str(k.dims), dim - ) + f"on {str(k.dims):s} but the target dimension is {dim:s}." ) def _broadcast_indexes_outer(self, key): @@ -2550,8 +2548,8 @@ def coarsen_reshape(self, windows, boundary, side): variable = variable.pad(pad_width, mode="constant") else: raise TypeError( - "{} is invalid for boundary. Valid option is 'exact', " - "'trim' and 'pad'".format(boundary[d]) + f"{boundary[d]} is invalid for boundary. Valid option is 'exact', " + "'trim' and 'pad'" ) shape = [] diff --git a/xarray/plot/dataarray_plot.py b/xarray/plot/dataarray_plot.py index d2c0a8e2af6..3f7b1568e64 100644 --- a/xarray/plot/dataarray_plot.py +++ b/xarray/plot/dataarray_plot.py @@ -486,8 +486,8 @@ def line( if ndims > 2: raise ValueError( "Line plots are for 1- or 2-dimensional DataArrays. " - "Passed DataArray has {ndims} " - "dimensions".format(ndims=ndims) + f"Passed DataArray has {ndims} " + "dimensions" ) # The allargs dict passed to _easy_facetgrid above contains args diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py index 2c58fe83cef..70e8bd3fdb9 100644 --- a/xarray/plot/utils.py +++ b/xarray/plot/utils.py @@ -1131,7 +1131,7 @@ def _get_color_and_size(value): # Labels are not numerical so modifying label_values is not # possible, instead filter the array with nicely distributed # indexes: - if type(num) == int: + if type(num) == int: # noqa: E721 loc = mpl.ticker.LinearLocator(num) else: raise ValueError("`num` only supports integers for non-numeric labels.") diff --git a/xarray/testing.py b/xarray/testing.py index 6a8bb04f170..0837b562668 100644 --- a/xarray/testing.py +++ b/xarray/testing.py @@ -364,7 +364,7 @@ def _assert_dataset_invariants(ds: Dataset, check_default_indexes: bool): set(ds._variables), ) - assert type(ds._dims) is dict, ds._dims + assert type(ds._dims) is dict, ds._dims # noqa: E721 assert all(isinstance(v, int) for v in ds._dims.values()), ds._dims var_dims: set[Hashable] = set() for v in ds._variables.values(): diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index e119cfe9bc6..d0e9f01bdae 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -680,7 +680,7 @@ def test_properties(self) -> None: # change them inadvertently: assert isinstance(ds.dims, utils.Frozen) assert isinstance(ds.dims.mapping, dict) - assert type(ds.dims.mapping) is dict + assert type(ds.dims.mapping) is dict # noqa: E721 assert ds.dims == {"dim1": 8, "dim2": 9, "dim3": 10, "time": 20} assert ds.sizes == ds.dims diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py index f30cdcf3f73..118d78d2e04 100644 --- a/xarray/tests/test_variable.py +++ b/xarray/tests/test_variable.py @@ -1091,7 +1091,7 @@ def test_data_and_values(self): def test_numpy_same_methods(self): v = Variable([], np.float32(0.0)) assert v.item() == 0 - assert type(v.item()) is float + assert type(v.item()) is float # noqa: E721 v = IndexVariable("x", np.arange(5)) assert 2 == v.searchsorted(2)