Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace flake8/isort with ruff #566

Merged
merged 1 commit into from
Jan 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 4 additions & 15 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,11 @@ repos:
- id: debug-statements
- id: mixed-line-ending

- repo: https://github.com/asottile/pyupgrade
rev: v3.3.1
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: 'v0.0.238'
hooks:
- id: pyupgrade
args:
- '--py37-plus'
- id: ruff
args: ['--fix']

- repo: https://github.com/psf/black
rev: 22.12.0
Expand All @@ -28,16 +27,6 @@ repos:
hooks:
- id: blackdoc

- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8

- repo: https://github.com/PyCQA/isort
rev: 5.11.4
hooks:
- id: isort

- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.0-alpha.4
hooks:
Expand Down
6 changes: 3 additions & 3 deletions intake_esm/_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def is_pattern(value):


def search(
*, df: pd.DataFrame, query: typing.Dict[str, typing.Any], columns_with_iterables: set
*, df: pd.DataFrame, query: dict[str, typing.Any], columns_with_iterables: set
) -> pd.DataFrame:
"""Search for entries in the catalog."""

Expand Down Expand Up @@ -59,8 +59,8 @@ def search(
def search_apply_require_all_on(
*,
df: pd.DataFrame,
query: typing.Dict[str, typing.Any],
require_all_on: typing.Union[str, typing.List[typing.Any]],
query: dict[str, typing.Any],
require_all_on: typing.Union[str, list[typing.Any]],
columns_with_iterables: set = None,
) -> pd.DataFrame:
_query = query.copy()
Expand Down
36 changes: 17 additions & 19 deletions intake_esm/cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def _validate_data_format(cls, values):
class Aggregation(pydantic.BaseModel):
type: AggregationType
attribute_name: pydantic.StrictStr
options: typing.Optional[typing.Dict] = {}
options: typing.Optional[dict] = {}

class Config:
validate_all = True
Expand All @@ -95,8 +95,8 @@ class Config:

class AggregationControl(pydantic.BaseModel):
variable_column_name: pydantic.StrictStr
groupby_attrs: typing.List[pydantic.StrictStr]
aggregations: typing.List[Aggregation] = []
groupby_attrs: list[pydantic.StrictStr]
aggregations: list[Aggregation] = []

class Config:
validate_all = True
Expand All @@ -109,11 +109,11 @@ class ESMCatalogModel(pydantic.BaseModel):
"""

esmcat_version: pydantic.StrictStr
attributes: typing.List[Attribute]
attributes: list[Attribute]
assets: Assets
aggregation_control: typing.Optional[AggregationControl] = None
id: typing.Optional[str] = ''
catalog_dict: typing.Optional[typing.List[typing.Dict]] = None
catalog_dict: typing.Optional[list[dict]] = None
catalog_file: pydantic.StrictStr = None
description: pydantic.StrictStr = None
title: pydantic.StrictStr = None
Expand All @@ -135,7 +135,7 @@ def validate_catalog(cls, values):
return values

@classmethod
def from_dict(cls, data: typing.Dict) -> 'ESMCatalogModel':
def from_dict(cls, data: dict) -> 'ESMCatalogModel':
esmcat = data['esmcat']
df = data['df']
if 'last_updated' not in esmcat:
Expand All @@ -152,7 +152,7 @@ def save(
catalog_type: str = 'dict',
to_csv_kwargs: dict = None,
json_dump_kwargs: dict = None,
storage_options: typing.Dict[str, typing.Any] = None,
storage_options: dict[str, typing.Any] = None,
) -> None:
"""
Save the catalog to a file.
Expand Down Expand Up @@ -227,8 +227,8 @@ def save(
def load(
cls,
json_file: typing.Union[str, pydantic.FilePath, pydantic.AnyUrl],
storage_options: typing.Dict[str, typing.Any] = None,
read_csv_kwargs: typing.Dict[str, typing.Any] = None,
storage_options: dict[str, typing.Any] = None,
read_csv_kwargs: dict[str, typing.Any] = None,
) -> 'ESMCatalogModel':
"""
Loads the catalog from a file
Expand Down Expand Up @@ -273,7 +273,7 @@ def load(
return cat

@property
def columns_with_iterables(self) -> typing.Set[str]:
def columns_with_iterables(self) -> set[str]:
"""Return a set of columns that have iterables."""
if self._df.empty:
return set()
Expand Down Expand Up @@ -325,9 +325,7 @@ def grouped(self) -> typing.Union[pd.core.groupby.DataFrameGroupBy, pd.DataFrame
return self.df.groupby(self.aggregation_control.groupby_attrs)
return self.df

def _construct_group_keys(
self, sep: str = '.'
) -> typing.Dict[str, typing.Union[str, typing.Tuple[str]]]:
def _construct_group_keys(self, sep: str = '.') -> dict[str, typing.Union[str, tuple[str]]]:
grouped = self.grouped
if isinstance(grouped, pd.core.groupby.generic.DataFrameGroupBy):
internal_keys = grouped.groups.keys()
Expand All @@ -346,7 +344,7 @@ def _construct_group_keys(

return dict(zip(public_keys, internal_keys))

def _unique(self) -> typing.Dict:
def _unique(self) -> dict:
def _find_unique(series):
values = series.dropna()
if series.name in self.columns_with_iterables:
Expand All @@ -370,8 +368,8 @@ def nunique(self) -> pd.Series:
def search(
self,
*,
query: typing.Union['QueryModel', typing.Dict[str, typing.Any]],
require_all_on: typing.Union[str, typing.List[str]] = None,
query: typing.Union['QueryModel', dict[str, typing.Any]],
require_all_on: typing.Union[str, list[str]] = None,
) -> 'ESMCatalogModel':
"""
Search for entries in the catalog.
Expand Down Expand Up @@ -417,9 +415,9 @@ def search(
class QueryModel(pydantic.BaseModel):
"""A Pydantic model to represent a query to be executed against a catalog."""

query: typing.Dict[pydantic.StrictStr, typing.Union[typing.Any, typing.List[typing.Any]]]
columns: typing.List[str]
require_all_on: typing.Union[str, typing.List[typing.Any]] = None
query: dict[pydantic.StrictStr, typing.Union[typing.Any, list[typing.Any]]]
columns: list[str]
require_all_on: typing.Union[str, list[typing.Any]] = None

class Config:
validate_all = True
Expand Down
38 changes: 18 additions & 20 deletions intake_esm/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,14 +71,14 @@ class esm_datastore(Catalog):

def __init__(
self,
obj: typing.Union[pydantic.FilePath, pydantic.AnyUrl, typing.Dict[str, typing.Any]],
obj: typing.Union[pydantic.FilePath, pydantic.AnyUrl, dict[str, typing.Any]],
*,
progressbar: bool = True,
sep: str = '.',
registry: typing.Optional[DerivedVariableRegistry] = None,
read_csv_kwargs: typing.Dict[str, typing.Any] = None,
storage_options: typing.Dict[str, typing.Any] = None,
**intake_kwargs: typing.Dict[str, typing.Any],
read_csv_kwargs: dict[str, typing.Any] = None,
storage_options: dict[str, typing.Any] = None,
**intake_kwargs: dict[str, typing.Any],
):

"""Intake Catalog representing an ESM Collection."""
Expand Down Expand Up @@ -113,7 +113,7 @@ def _validate_derivedcat(self) -> None:
f'Derived variable {key} depends on unknown column {col} in query: {entry.query}. Valid ESM catalog columns: {self.esmcat.df.columns.tolist()}.'
)

def keys(self) -> typing.List[str]:
def keys(self) -> list[str]:
"""
Get keys for the catalog entries

Expand Down Expand Up @@ -182,7 +182,7 @@ def df(self) -> pd.DataFrame:
def __len__(self) -> int:
return len(self.keys())

def _get_entries(self) -> typing.Dict[str, ESMDataSource]:
def _get_entries(self) -> dict[str, ESMDataSource]:
# Due to just-in-time entry creation, we may not have all entries loaded
# We need to make sure to create entries missing from self._entries
missing = set(self.keys()) - set(self._entries.keys())
Expand Down Expand Up @@ -283,7 +283,7 @@ def _ipython_display_(self):
contents = self._repr_html_()
display(HTML(contents))

def __dir__(self) -> typing.List[str]:
def __dir__(self) -> list[str]:
rv = [
'df',
'to_dataset_dict',
Expand All @@ -304,9 +304,7 @@ def _ipython_key_completions_(self):
return self.__dir__()

@pydantic.validate_arguments
def search(
self, require_all_on: typing.Union[str, typing.List[str]] = None, **query: typing.Any
):
def search(self, require_all_on: typing.Union[str, list[str]] = None, **query: typing.Any):
"""Search for entries in the catalog.

Parameters
Expand Down Expand Up @@ -419,9 +417,9 @@ def serialize(
name: pydantic.StrictStr,
directory: typing.Union[pydantic.DirectoryPath, pydantic.StrictStr] = None,
catalog_type: str = 'dict',
to_csv_kwargs: typing.Dict[typing.Any, typing.Any] = None,
json_dump_kwargs: typing.Dict[typing.Any, typing.Any] = None,
storage_options: typing.Dict[str, typing.Any] = None,
to_csv_kwargs: dict[typing.Any, typing.Any] = None,
json_dump_kwargs: dict[typing.Any, typing.Any] = None,
storage_options: dict[str, typing.Any] = None,
) -> None:
"""Serialize catalog to corresponding json and csv files.

Expand Down Expand Up @@ -508,15 +506,15 @@ def unique(self) -> pd.Series:
@pydantic.validate_arguments
def to_dataset_dict(
self,
xarray_open_kwargs: typing.Dict[str, typing.Any] = None,
xarray_combine_by_coords_kwargs: typing.Dict[str, typing.Any] = None,
xarray_open_kwargs: dict[str, typing.Any] = None,
xarray_combine_by_coords_kwargs: dict[str, typing.Any] = None,
preprocess: typing.Callable = None,
storage_options: typing.Dict[pydantic.StrictStr, typing.Any] = None,
storage_options: dict[pydantic.StrictStr, typing.Any] = None,
progressbar: pydantic.StrictBool = None,
aggregate: pydantic.StrictBool = None,
skip_on_error: pydantic.StrictBool = False,
**kwargs,
) -> typing.Dict[str, xr.Dataset]:
) -> dict[str, xr.Dataset]:
"""
Load catalog entries into a dictionary of xarray datasets.

Expand Down Expand Up @@ -654,10 +652,10 @@ def to_dataset_dict(
@pydantic.validate_arguments
def to_datatree(
self,
xarray_open_kwargs: typing.Dict[str, typing.Any] = None,
xarray_combine_by_coords_kwargs: typing.Dict[str, typing.Any] = None,
xarray_open_kwargs: dict[str, typing.Any] = None,
xarray_combine_by_coords_kwargs: dict[str, typing.Any] = None,
preprocess: typing.Callable = None,
storage_options: typing.Dict[pydantic.StrictStr, typing.Any] = None,
storage_options: dict[pydantic.StrictStr, typing.Any] = None,
progressbar: pydantic.StrictBool = None,
aggregate: pydantic.StrictBool = None,
skip_on_error: pydantic.StrictBool = False,
Expand Down
18 changes: 9 additions & 9 deletions intake_esm/derived.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class DerivedVariableError(Exception):
class DerivedVariable(pydantic.BaseModel):
func: typing.Callable
variable: pydantic.StrictStr
query: typing.Dict[pydantic.StrictStr, typing.Union[typing.Any, typing.List[typing.Any]]]
query: dict[pydantic.StrictStr, typing.Union[typing.Any, list[typing.Any]]]
prefer_derived: bool

@pydantic.validator('query')
Expand All @@ -25,7 +25,7 @@ def validate_query(cls, values):
_query[key] = [value]
return _query

def dependent_variables(self, variable_key_name: str) -> typing.List[pydantic.StrictStr]:
def dependent_variables(self, variable_key_name: str) -> list[pydantic.StrictStr]:
"""Return a list of dependent variables for a given variable"""
return self.query[variable_key_name]

Expand Down Expand Up @@ -92,7 +92,7 @@ def register(
func: typing.Callable,
*,
variable: str,
query: typing.Dict[pydantic.StrictStr, typing.Union[typing.Any, typing.List[typing.Any]]],
query: dict[pydantic.StrictStr, typing.Union[typing.Any, list[typing.Any]]],
prefer_derived: bool = False,
) -> typing.Callable:
"""Register a derived variable
Expand Down Expand Up @@ -134,16 +134,16 @@ def __repr__(self) -> str:
def __len__(self) -> int:
return len(self._registry)

def items(self) -> typing.List[typing.Tuple[str, DerivedVariable]]:
def items(self) -> list[tuple[str, DerivedVariable]]:
return list(self._registry.items())

def keys(self) -> typing.List[str]:
def keys(self) -> list[str]:
return list(self._registry.keys())

def values(self) -> typing.List[DerivedVariable]:
def values(self) -> list[DerivedVariable]:
return list(self._registry.values())

def search(self, variable: typing.Union[str, typing.List[str]]) -> 'DerivedVariableRegistry':
def search(self, variable: typing.Union[str, list[str]]) -> 'DerivedVariableRegistry':
"""Search for a derived variable by name or list of names

Parameters
Expand All @@ -166,10 +166,10 @@ def search(self, variable: typing.Union[str, typing.List[str]]) -> 'DerivedVaria
def update_datasets(
self,
*,
datasets: typing.Dict[str, xr.Dataset],
datasets: dict[str, xr.Dataset],
variable_key_name: str,
skip_on_error: bool = False,
) -> typing.Dict[str, xr.Dataset]:
) -> dict[str, xr.Dataset]:
"""Given a dictionary of datasets, return a dictionary of datasets with the derived variables

Parameters
Expand Down
14 changes: 7 additions & 7 deletions intake_esm/source.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,19 +125,19 @@ class ESMDataSource(DataSource):
def __init__(
self,
key: pydantic.StrictStr,
records: typing.List[typing.Dict[str, typing.Any]],
records: list[dict[str, typing.Any]],
variable_column_name: pydantic.StrictStr,
path_column_name: pydantic.StrictStr,
data_format: typing.Optional[DataFormat],
format_column_name: typing.Optional[pydantic.StrictStr],
*,
aggregations: typing.Optional[typing.List[Aggregation]] = None,
requested_variables: typing.List[str] = None,
aggregations: typing.Optional[list[Aggregation]] = None,
requested_variables: list[str] = None,
preprocess: typing.Callable = None,
storage_options: typing.Dict[str, typing.Any] = None,
xarray_open_kwargs: typing.Dict[str, typing.Any] = None,
xarray_combine_by_coords_kwargs: typing.Dict[str, typing.Any] = None,
intake_kwargs: typing.Dict[str, typing.Any] = None,
storage_options: dict[str, typing.Any] = None,
xarray_open_kwargs: dict[str, typing.Any] = None,
xarray_combine_by_coords_kwargs: dict[str, typing.Any] = None,
intake_kwargs: dict[str, typing.Any] = None,
):
"""An intake compatible Data Source for ESM data.

Expand Down
Loading