diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index 97d91111e833a9..4bd56ccb1b5cec 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -17,7 +17,7 @@ try: import pandas._testing as tm except ImportError: - import pandas.util.testing as tm # noqa:F401 + import pandas.util.testing as tm # noqa: F401 numeric_dtypes = [ diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst index dcbae66906a298..01bf999a1f99f5 100644 --- a/doc/source/user_guide/window.rst +++ b/doc/source/user_guide/window.rst @@ -96,7 +96,7 @@ be calculated with :meth:`~Rolling.apply` by specifying a separate column of wei return arr df = pd.DataFrame([[1, 2, 0.6], [2, 3, 0.4], [3, 4, 0.2], [4, 5, 0.7]]) - df.rolling(2, method="table", min_periods=0).apply(weighted_mean, raw=True, engine="numba") # noqa:E501 + df.rolling(2, method="table", min_periods=0).apply(weighted_mean, raw=True, engine="numba") # noqa: E501 .. versionadded:: 1.3 diff --git a/pandas/__init__.py b/pandas/__init__.py index 6ddfbadcf91d19..cb00f9ed12647b 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -21,7 +21,7 @@ try: # numpy compat from pandas.compat import ( - is_numpy_dev as _is_numpy_dev, # pyright: ignore # noqa:F401 + is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401,E501 ) except ImportError as _err: # pragma: no cover _module = _err.name @@ -41,7 +41,7 @@ ) # let init-time option registration happen -import pandas.core.config_init # pyright: ignore # noqa:F401 +import pandas.core.config_init # pyright: ignore[reportUnusedImport] # noqa: F401 from pandas.core.api import ( # dtype diff --git a/pandas/_config/__init__.py b/pandas/_config/__init__.py index 73ed99c3a46405..c37ad563df8ef3 100644 --- a/pandas/_config/__init__.py +++ b/pandas/_config/__init__.py @@ -17,7 +17,7 @@ "using_copy_on_write", ] from pandas._config import config -from pandas._config import dates # pyright: ignore # noqa:F401 +from pandas._config import dates # pyright: ignore[reportUnusedImport] # noqa: F401 from pandas._config.config import ( _global_config, describe_option, diff --git a/pandas/_libs/__init__.py b/pandas/_libs/__init__.py index 2c532cda480f07..29ed375134a2b6 100644 --- a/pandas/_libs/__init__.py +++ b/pandas/_libs/__init__.py @@ -13,8 +13,8 @@ # Below imports needs to happen first to ensure pandas top level # module gets monkeypatched with the pandas_datetime_CAPI # see pandas_datetime_exec in pd_datetime.c -import pandas._libs.pandas_parser # noqa # isort: skip # type: ignore[reportUnusedImport] -import pandas._libs.pandas_datetime # noqa # isort: skip # type: ignore[reportUnusedImport] +import pandas._libs.pandas_parser # noqa: F401,E501 # isort: skip # type: ignore[reportUnusedImport] +import pandas._libs.pandas_datetime # noqa: F401,E501 # isort: skip # type: ignore[reportUnusedImport] from pandas._libs.interval import Interval from pandas._libs.tslibs import ( NaT, diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 584486799e3edf..ae047939c6526a 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -30,7 +30,7 @@ from enum import Enum class _NoDefault(Enum): no_default = ... -no_default: Final = _NoDefault.no_default # noqa +no_default: Final = _NoDefault.no_default # noqa: PYI015 NoDefault = Literal[_NoDefault.no_default] i8max: int diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 39dade594a5afc..9ba3067be5a14f 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -534,7 +534,7 @@ def _box_func(self, x: np.datetime64) -> Timestamp | NaTType: # error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype" # incompatible with return type "ExtensionDtype" in supertype # "ExtensionArray" - def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override] # noqa:E501 + def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override] # noqa: E501 """ The dtype for the DatetimeArray. diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 6dabb866b8f5c1..08a5f9c79274b9 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1200,7 +1200,7 @@ def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None: Traceback (most recent call last): ... TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' - """ # noqa:E501 + """ # noqa: E501 msg = ( f"The '{dtype.name}' dtype has no unit. " f"Please pass in '{dtype.name}[ns]' instead." diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 093101e2ae5a42..7fff0f0d2d8051 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -902,7 +902,7 @@ class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype): # error: Incompatible types in assignment (expression has type # "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype" # defined the type as "Dict[str, PandasExtensionDtype]") [assignment] - _cache_dtypes: dict[BaseOffset, PeriodDtype] = {} # type: ignore[assignment] # noqa:E501 + _cache_dtypes: dict[BaseOffset, PeriodDtype] = {} # type: ignore[assignment] # noqa: E501 __hash__ = PeriodDtypeBase.__hash__ _freq: BaseOffset diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 674746e00c84be..56c58bc9347e02 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10431,7 +10431,7 @@ def corr( dogs cats dogs 1.0 NaN cats NaN 1.0 - """ # noqa:E501 + """ # noqa: E501 data = self._get_numeric_data() if numeric_only else self cols = data.columns idx = cols.copy() @@ -10676,7 +10676,7 @@ def corrwith( d 1.0 e NaN dtype: float64 - """ # noqa:E501 + """ # noqa: E501 axis = self._get_axis_number(axis) this = self._get_numeric_data() if numeric_only else self diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4258d1edf34dcc..79e24ad2d0e4c2 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -210,7 +210,7 @@ _shared_doc_kwargs = { "axes": "keywords for axes", "klass": "Series/DataFrame", - "axes_single_arg": "{0 or 'index'} for Series, {0 or 'index', 1 or 'columns'} for DataFrame", # noqa:E501 + "axes_single_arg": "{0 or 'index'} for Series, {0 or 'index', 1 or 'columns'} for DataFrame", # noqa: E501 "inplace": """ inplace : bool, default False If True, performs operation inplace and returns None.""", @@ -2904,7 +2904,7 @@ def to_sql( >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] - """ # noqa:E501 + """ # noqa: E501 from pandas.io import sql return sql.to_sql( @@ -5901,7 +5901,7 @@ def sample( num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 - """ # noqa:E501 + """ # noqa: E501 if axis is None: axis = 0 diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 072627b275a02a..c32c96077bde74 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -4227,7 +4227,7 @@ def sample( 5 black 5 2 blue 2 0 red 0 - """ # noqa:E501 + """ # noqa: E501 if self._selected_obj.empty: # GH48459 prevent ValueError when object is empty return self._selected_obj diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 53ef1bac920852..02f8393eed102e 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1537,7 +1537,7 @@ def delete(self, loc) -> list[Block]: else: # No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, slice]" - values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa + values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa: E501 locs = mgr_locs_arr[previous_loc + 1 : idx] nb = type(self)( values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs diff --git a/pandas/core/methods/describe.py b/pandas/core/methods/describe.py index c8f8a2127083e3..9d597b9f4b4890 100644 --- a/pandas/core/methods/describe.py +++ b/pandas/core/methods/describe.py @@ -193,7 +193,7 @@ def _select_data(self) -> DataFrame: include=self.include, exclude=self.exclude, ) - return data # pyright: ignore + return data # pyright: ignore[reportGeneralTypeIssues] def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]: diff --git a/pandas/core/series.py b/pandas/core/series.py index afa124368a29e2..f47b5edc0f2430 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2682,7 +2682,7 @@ def corr( >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 - """ # noqa:E501 + """ # noqa: E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index b9a5c431d83875..57df011590cafe 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -218,7 +218,7 @@ def to_numeric( values = ensure_object(values) coerce_numeric = errors not in ("ignore", "raise") try: - values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload] # noqa + values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload] # noqa: E501 values, set(), coerce_numeric=coerce_numeric, diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 2ea8de22119090..e8670757e16695 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -161,7 +161,7 @@ def __init__(self) -> None: import pyarrow.parquet # import utils to register the pyarrow extension types - import pandas.core.arrays.arrow.extension_types # pyright: ignore # noqa:F401 + import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401,E501 self.api = pyarrow @@ -243,7 +243,7 @@ def read( mapping = _arrow_dtype_mapping() to_pandas_kwargs["types_mapper"] = mapping.get elif dtype_backend == "pyarrow": - to_pandas_kwargs["types_mapper"] = pd.ArrowDtype # type: ignore[assignment] # noqa + to_pandas_kwargs["types_mapper"] = pd.ArrowDtype # type: ignore[assignment] # noqa: E501 manager = get_option("mode.data_manager") if manager == "array": diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 4c0cffffb423e3..564339cefa3aac 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -713,7 +713,7 @@ def _infer_types( values, na_values, False, - convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa: E501 ) except (ValueError, TypeError): # e.g. encountering datetime string gets ValueError @@ -749,7 +749,7 @@ def _infer_types( np.asarray(values), true_values=self.true_values, false_values=self.false_values, - convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa: E501 ) if result.dtype == np.bool_ and non_default_dtype_backend: if bool_mask is None: @@ -812,7 +812,7 @@ def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLi if is_bool_dtype(cast_type): # error: Unexpected keyword argument "true_values" for # "_from_sequence_of_strings" of "ExtensionArray" - return array_type._from_sequence_of_strings( # type: ignore[call-arg] # noqa:E501 + return array_type._from_sequence_of_strings( # type: ignore[call-arg] # noqa: E501 values, dtype=cast_type, true_values=self.true_values, diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 7fa1cb2840fae4..6a161febfe3168 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -758,7 +758,7 @@ def to_sql( rows as stipulated in the `sqlite3 `__ or `SQLAlchemy `__ - """ # noqa:E501 + """ # noqa: E501 if if_exists not in ("fail", "replace", "append"): raise ValueError(f"'{if_exists}' is not valid for if_exists") diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py index ffc44b30a3870c..cdf5d967d9c3dd 100644 --- a/pandas/tests/arrays/categorical/test_repr.py +++ b/pandas/tests/arrays/categorical/test_repr.py @@ -79,7 +79,7 @@ def test_unicode_print(self): expected = """\ ['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう'] Length: 60 -Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa:E501 +Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501 assert repr(c) == expected @@ -89,7 +89,7 @@ def test_unicode_print(self): c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20) expected = """['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう'] Length: 60 -Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa:E501 +Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501 assert repr(c) == expected @@ -214,14 +214,14 @@ def test_categorical_repr_datetime_ordered(self): c = Categorical(idx, ordered=True) exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < - 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa:E501 + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < - 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa:E501 + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 assert repr(c) == exp @@ -230,7 +230,7 @@ def test_categorical_repr_datetime_ordered(self): exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < - 2011-01-01 13:00:00-05:00]""" # noqa:E501 + 2011-01-01 13:00:00-05:00]""" # noqa: E501 assert repr(c) == exp @@ -238,7 +238,7 @@ def test_categorical_repr_datetime_ordered(self): exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < - 2011-01-01 13:00:00-05:00]""" # noqa:E501 + 2011-01-01 13:00:00-05:00]""" # noqa: E501 assert repr(c) == exp @@ -258,14 +258,14 @@ def test_categorical_repr_period(self): c = Categorical(idx) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, - 2011-01-01 13:00]""" # noqa:E501 + 2011-01-01 13:00]""" # noqa: E501 assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, - 2011-01-01 13:00]""" # noqa:E501 + 2011-01-01 13:00]""" # noqa: E501 assert repr(c) == exp @@ -278,7 +278,7 @@ def test_categorical_repr_period(self): c = Categorical(idx.append(idx), categories=idx) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] -Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa:E501 +Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa: E501 assert repr(c) == exp @@ -287,14 +287,14 @@ def test_categorical_repr_period_ordered(self): c = Categorical(idx, ordered=True) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < - 2011-01-01 13:00]""" # noqa:E501 + 2011-01-01 13:00]""" # noqa: E501 assert repr(c) == exp c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < - 2011-01-01 13:00]""" # noqa:E501 + 2011-01-01 13:00]""" # noqa: E501 assert repr(c) == exp @@ -307,7 +307,7 @@ def test_categorical_repr_period_ordered(self): c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] -Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa:E501 +Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa: E501 assert repr(c) == exp @@ -321,7 +321,7 @@ def test_categorical_repr_timedelta(self): c = Categorical(idx.append(idx), categories=idx) exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] -Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa:E501 +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa: E501 assert repr(c) == exp @@ -331,7 +331,7 @@ def test_categorical_repr_timedelta(self): Length: 20 Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, - 18 days 01:00:00, 19 days 01:00:00]""" # noqa:E501 + 18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501 assert repr(c) == exp @@ -340,7 +340,7 @@ def test_categorical_repr_timedelta(self): Length: 40 Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, - 18 days 01:00:00, 19 days 01:00:00]""" # noqa:E501 + 18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501 assert repr(c) == exp @@ -354,7 +354,7 @@ def test_categorical_repr_timedelta_ordered(self): c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] -Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa:E501 +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa: E501 assert repr(c) == exp @@ -364,7 +364,7 @@ def test_categorical_repr_timedelta_ordered(self): Length: 20 Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < - 18 days 01:00:00 < 19 days 01:00:00]""" # noqa:E501 + 18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501 assert repr(c) == exp @@ -373,26 +373,26 @@ def test_categorical_repr_timedelta_ordered(self): Length: 40 Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < - 18 days 01:00:00 < 19 days 01:00:00]""" # noqa:E501 + 18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501 assert repr(c) == exp def test_categorical_index_repr(self): idx = CategoricalIndex(Categorical([1, 2, 3])) - exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == exp i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64))) - exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=False, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp def test_categorical_index_repr_ordered(self): i = CategoricalIndex(Categorical([1, 2, 3], ordered=True)) - exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64), ordered=True)) - exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=True, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp def test_categorical_index_repr_datetime(self): @@ -401,7 +401,7 @@ def test_categorical_index_repr_datetime(self): exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', '2011-01-01 11:00:00', '2011-01-01 12:00:00', '2011-01-01 13:00:00'], - categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa:E501 + categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp @@ -410,7 +410,7 @@ def test_categorical_index_repr_datetime(self): exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], - categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa:E501 + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp @@ -420,7 +420,7 @@ def test_categorical_index_repr_datetime_ordered(self): exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', '2011-01-01 11:00:00', '2011-01-01 12:00:00', '2011-01-01 13:00:00'], - categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa:E501 + categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp @@ -429,7 +429,7 @@ def test_categorical_index_repr_datetime_ordered(self): exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], - categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa:E501 + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp @@ -439,7 +439,7 @@ def test_categorical_index_repr_datetime_ordered(self): '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], - categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa:E501 + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp @@ -447,24 +447,24 @@ def test_categorical_index_repr_period(self): # test all length idx = period_range("2011-01-01 09:00", freq="H", periods=1) i = CategoricalIndex(Categorical(idx)) - exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp idx = period_range("2011-01-01 09:00", freq="H", periods=2) i = CategoricalIndex(Categorical(idx)) - exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp idx = period_range("2011-01-01 09:00", freq="H", periods=3) i = CategoricalIndex(Categorical(idx)) - exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp idx = period_range("2011-01-01 09:00", freq="H", periods=5) i = CategoricalIndex(Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], - categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa:E501 + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp @@ -473,13 +473,13 @@ def test_categorical_index_repr_period(self): '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], - categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa:E501 + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp idx = period_range("2011-01", freq="M", periods=5) i = CategoricalIndex(Categorical(idx)) - exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp def test_categorical_index_repr_period_ordered(self): @@ -487,19 +487,19 @@ def test_categorical_index_repr_period_ordered(self): i = CategoricalIndex(Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], - categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa:E501 + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp idx = period_range("2011-01", freq="M", periods=5) i = CategoricalIndex(Categorical(idx, ordered=True)) - exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp def test_categorical_index_repr_timedelta(self): idx = timedelta_range("1 days", periods=5) i = CategoricalIndex(Categorical(idx)) - exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=False, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp idx = timedelta_range("1 hours", periods=10) @@ -508,14 +508,14 @@ def test_categorical_index_repr_timedelta(self): '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', '9 days 01:00:00'], - categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=False, dtype='category')""" # noqa:E501 + categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=False, dtype='category')""" # noqa: E501 assert repr(i) == exp def test_categorical_index_repr_timedelta_ordered(self): idx = timedelta_range("1 days", periods=5) i = CategoricalIndex(Categorical(idx, ordered=True)) - exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=True, dtype='category')""" # noqa:E501 + exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp idx = timedelta_range("1 hours", periods=10) @@ -524,7 +524,7 @@ def test_categorical_index_repr_timedelta_ordered(self): '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', '9 days 01:00:00'], - categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=True, dtype='category')""" # noqa:E501 + categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=True, dtype='category')""" # noqa: E501 assert repr(i) == exp diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py index f3566e040dc85e..856a5b3a22a95d 100644 --- a/pandas/tests/computation/test_compat.py +++ b/pandas/tests/computation/test_compat.py @@ -27,6 +27,6 @@ def test_compat(): def test_invalid_numexpr_version(engine, parser): if engine == "numexpr": pytest.importorskip("numexpr") - a, b = 1, 2 # noqa:F841 + a, b = 1, 2 # noqa: F841 res = pd.eval("a + b", engine=engine, parser=parser) assert res == 3 diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 93ae2bfdd01e44..35960c707d3bda 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -622,8 +622,8 @@ def test_unary_in_function(self): ), ) def test_disallow_scalar_bool_ops(self, ex, engine, parser): - x, a, b = np.random.randn(3), 1, 2 # noqa:F841 - df = DataFrame(np.random.randn(3, 2)) # noqa:F841 + x, a, b = np.random.randn(3), 1, 2 # noqa: F841 + df = DataFrame(np.random.randn(3, 2)) # noqa: F841 msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not" with pytest.raises(NotImplementedError, match=msg): @@ -657,7 +657,7 @@ def test_identical(self, engine, parser): tm.assert_numpy_array_equal(result, np.array([1.5])) assert result.shape == (1,) - x = np.array([False]) # noqa:F841 + x = np.array([False]) # noqa: F841 result = pd.eval("x", engine=engine, parser=parser) tm.assert_numpy_array_equal(result, np.array([False])) assert result.shape == (1,) @@ -1103,7 +1103,7 @@ def test_single_variable(self): tm.assert_frame_equal(df, df2) def test_failing_subscript_with_name_error(self): - df = DataFrame(np.random.randn(5, 3)) # noqa:F841 + df = DataFrame(np.random.randn(5, 3)) # noqa: F841 with pytest.raises(NameError, match="name 'x' is not defined"): self.eval("df[x > 2] > 2") @@ -1172,7 +1172,7 @@ def test_assignment_single_assign_new(self): def test_assignment_single_assign_local_overlap(self): df = DataFrame(np.random.randn(5, 2), columns=list("ab")) df = df.copy() - a = 1 # noqa:F841 + a = 1 # noqa: F841 df.eval("a = 1 + b", inplace=True) expected = df.copy() @@ -1182,7 +1182,7 @@ def test_assignment_single_assign_local_overlap(self): def test_assignment_single_assign_name(self): df = DataFrame(np.random.randn(5, 2), columns=list("ab")) - a = 1 # noqa:F841 + a = 1 # noqa: F841 old_a = df.a.copy() df.eval("a = a + b", inplace=True) result = old_a + df.b @@ -1481,7 +1481,7 @@ def test_simple_in_ops(self, engine, parser): pd.eval("[3] not in (1, 2, [[3]])", engine=engine, parser=parser) def test_check_many_exprs(self, engine, parser): - a = 1 # noqa:F841 + a = 1 # noqa: F841 expr = " * ".join("a" * 33) expected = 1 res = pd.eval(expr, engine=engine, parser=parser) @@ -1520,7 +1520,7 @@ def test_fails_and_or_not(self, expr, engine, parser): @pytest.mark.parametrize("char", ["|", "&"]) def test_fails_ampersand_pipe(self, char, engine, parser): - df = DataFrame(np.random.randn(5, 3)) # noqa:F841 + df = DataFrame(np.random.randn(5, 3)) # noqa: F841 ex = f"(df + 2)[df > 1] > 0 {char} (df > 0)" if parser == "python": msg = "cannot evaluate scalar only bool ops" @@ -1640,7 +1640,7 @@ def test_no_new_locals(self, engine, parser): assert lcls == lcls2 def test_no_new_globals(self, engine, parser): - x = 1 # noqa:F841 + x = 1 # noqa: F841 gbls = globals().copy() pd.eval("x + 1", engine=engine, parser=parser) gbls2 = globals().copy() @@ -1738,7 +1738,7 @@ def test_name_error_exprs(engine, parser): @pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"]) def test_invalid_local_variable_reference(engine, parser, express): - a, b = 1, 2 # noqa:F841 + a, b = 1, 2 # noqa: F841 if parser != "pandas": with pytest.raises(SyntaxError, match="The '@' prefix is only"): @@ -1782,7 +1782,7 @@ def test_more_than_one_expression_raises(engine, parser): def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser): gen = {int: lambda: np.random.randint(10), float: np.random.randn} - mid = gen[lhs]() # noqa:F841 + mid = gen[lhs]() # noqa: F841 lhs = gen[lhs]() rhs = gen[rhs]() diff --git a/pandas/tests/copy_view/index/test_index.py b/pandas/tests/copy_view/index/test_index.py index 817be43475d0b4..5e9c04c0adfc3f 100644 --- a/pandas/tests/copy_view/index/test_index.py +++ b/pandas/tests/copy_view/index/test_index.py @@ -90,7 +90,7 @@ def test_index_from_series(using_copy_on_write): def test_index_from_series_copy(using_copy_on_write): ser = Series([1, 2]) - idx = Index(ser, copy=True) # noqa + idx = Index(ser, copy=True) # noqa: F841 arr = get_array(ser) ser.iloc[0] = 100 assert np.shares_memory(get_array(ser), arr) diff --git a/pandas/tests/copy_view/test_core_functionalities.py b/pandas/tests/copy_view/test_core_functionalities.py index 204e26b35d680f..25af1975523357 100644 --- a/pandas/tests/copy_view/test_core_functionalities.py +++ b/pandas/tests/copy_view/test_core_functionalities.py @@ -47,7 +47,7 @@ def test_setitem_with_view_invalidated_does_not_copy(using_copy_on_write, reques df["b"] = 100 arr = get_array(df, "a") - view = None # noqa + view = None # noqa: F841 df.iloc[0, 0] = 100 if using_copy_on_write: # Setitem split the block. Since the old block shared data with view diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py index 7e765cc5342d1e..dec41150a451c3 100644 --- a/pandas/tests/extension/base/__init__.py +++ b/pandas/tests/extension/base/__init__.py @@ -41,32 +41,32 @@ class TestMyDtype(BaseDtypeTests): ``assert_series_equal`` on your base test class. """ -from pandas.tests.extension.base.accumulate import BaseAccumulateTests # noqa -from pandas.tests.extension.base.casting import BaseCastingTests # noqa -from pandas.tests.extension.base.constructors import BaseConstructorsTests # noqa -from pandas.tests.extension.base.dim2 import ( # noqa +from pandas.tests.extension.base.accumulate import BaseAccumulateTests # noqa: F401 +from pandas.tests.extension.base.casting import BaseCastingTests # noqa: F401 +from pandas.tests.extension.base.constructors import BaseConstructorsTests # noqa: F401 +from pandas.tests.extension.base.dim2 import ( # noqa: F401 Dim2CompatTests, NDArrayBacked2DTests, ) -from pandas.tests.extension.base.dtype import BaseDtypeTests # noqa -from pandas.tests.extension.base.getitem import BaseGetitemTests # noqa -from pandas.tests.extension.base.groupby import BaseGroupbyTests # noqa -from pandas.tests.extension.base.index import BaseIndexTests # noqa -from pandas.tests.extension.base.interface import BaseInterfaceTests # noqa -from pandas.tests.extension.base.io import BaseParsingTests # noqa -from pandas.tests.extension.base.methods import BaseMethodsTests # noqa -from pandas.tests.extension.base.missing import BaseMissingTests # noqa -from pandas.tests.extension.base.ops import ( # noqa +from pandas.tests.extension.base.dtype import BaseDtypeTests # noqa: F401 +from pandas.tests.extension.base.getitem import BaseGetitemTests # noqa: F401 +from pandas.tests.extension.base.groupby import BaseGroupbyTests # noqa: F401 +from pandas.tests.extension.base.index import BaseIndexTests # noqa: F401 +from pandas.tests.extension.base.interface import BaseInterfaceTests # noqa: F401 +from pandas.tests.extension.base.io import BaseParsingTests # noqa: F401 +from pandas.tests.extension.base.methods import BaseMethodsTests # noqa: F401 +from pandas.tests.extension.base.missing import BaseMissingTests # noqa: F401 +from pandas.tests.extension.base.ops import ( # noqa: F401 BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil, BaseUnaryOpsTests, ) -from pandas.tests.extension.base.printing import BasePrintingTests # noqa -from pandas.tests.extension.base.reduce import ( # noqa +from pandas.tests.extension.base.printing import BasePrintingTests # noqa: F401 +from pandas.tests.extension.base.reduce import ( # noqa: F401 BaseBooleanReduceTests, BaseNoReduceTests, BaseNumericReduceTests, ) -from pandas.tests.extension.base.reshaping import BaseReshapingTests # noqa -from pandas.tests.extension.base.setitem import BaseSetitemTests # noqa +from pandas.tests.extension.base.reshaping import BaseReshapingTests # noqa: F401 +from pandas.tests.extension.base.setitem import BaseSetitemTests # noqa: F401 diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index bf0436e9c01eff..9f44e85789cbd7 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -264,8 +264,7 @@ def test_from_records_to_records(self): arr = np.zeros((2,), dtype=("i4,f4,a10")) arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")] - # TODO(wesm): unused - frame = DataFrame.from_records(arr) # noqa + DataFrame.from_records(arr) index = Index(np.arange(len(arr))[::-1]) indexed_frame = DataFrame.from_records(arr, index=index) @@ -366,7 +365,7 @@ def test_from_records_columns_not_modified(self): columns = ["a", "b", "c"] original_columns = list(columns) - df = DataFrame.from_records(tuples, columns=columns, index="a") # noqa + DataFrame.from_records(tuples, columns=columns, index="a") assert columns == original_columns diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 05b50c180ced08..224abbcef27df6 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -525,7 +525,7 @@ def test_getitem_fancy_slice_integers_step(self): df = DataFrame(np.random.randn(10, 5)) # this is OK - result = df.iloc[:8:2] # noqa + df.iloc[:8:2] df.iloc[:8:2] = np.nan assert isna(df.iloc[:8:2]).values.all() @@ -629,7 +629,7 @@ def test_setitem_fancy_scalar(self, float_frame): # individual value for j, col in enumerate(f.columns): - ts = f[col] # noqa + f[col] for idx in f.index[::5]: i = f.index.get_loc(idx) val = np.random.randn() diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index cfc42e81a4234b..c5e1e3c02c26e8 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -819,7 +819,7 @@ def test_where_bool_comparison(): df_mask = DataFrame( {"AAA": [True] * 4, "BBB": [False] * 4, "CCC": [True, False, True, False]} ) - result = df_mask.where(df_mask == False) # noqa:E712 + result = df_mask.where(df_mask == False) # noqa: E712 expected = DataFrame( { "AAA": np.array([np.nan] * 4, dtype=object), diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py index 2cff2c4b2bc573..47cebd31451e3f 100644 --- a/pandas/tests/frame/methods/test_asfreq.py +++ b/pandas/tests/frame/methods/test_asfreq.py @@ -143,11 +143,11 @@ def test_asfreq(self, datetime_frame): tm.assert_frame_equal(offset_monthly, rule_monthly) - filled = rule_monthly.asfreq("B", method="pad") # noqa + rule_monthly.asfreq("B", method="pad") # TODO: actually check that this worked. # don't forget! - filled_dep = rule_monthly.asfreq("B", method="pad") # noqa + rule_monthly.asfreq("B", method="pad") def test_asfreq_datetimeindex(self): df = DataFrame( diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index 9fdb600b6efc43..51136309662019 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -113,17 +113,17 @@ def test_astype_with_exclude_string(self, float_frame): def test_astype_with_view_float(self, float_frame): # this is the only real reason to do it this way tf = np.round(float_frame).astype(np.int32) - casted = tf.astype(np.float32, copy=False) + tf.astype(np.float32, copy=False) # TODO(wesm): verification? tf = float_frame.astype(np.float64) - casted = tf.astype(np.int64, copy=False) # noqa + tf.astype(np.int64, copy=False) def test_astype_with_view_mixed_float(self, mixed_float_frame): tf = mixed_float_frame.reindex(columns=["A", "B", "C"]) - casted = tf.astype(np.int64) - casted = tf.astype(np.float32) # noqa + tf.astype(np.int64) + tf.astype(np.float32) @pytest.mark.parametrize("dtype", [np.int32, np.int64]) @pytest.mark.parametrize("val", [np.nan, np.inf]) diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py index 027e12392206b0..ded2c7702f6f5d 100644 --- a/pandas/tests/frame/methods/test_fillna.py +++ b/pandas/tests/frame/methods/test_fillna.py @@ -603,10 +603,7 @@ def test_fill_corner(self, float_frame, float_string_frame): assert (filled.loc[filled.index[5:20], "foo"] == 0).all() del float_string_frame["foo"] - empty_float = float_frame.reindex(columns=[]) - - # TODO(wesm): unused? - result = empty_float.fillna(value=0) # noqa + float_frame.reindex(columns=[]).fillna(value=0) def test_fillna_downcast_dict(self): # GH#40809 diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py index e07ff7e9195090..8b0251b2a99286 100644 --- a/pandas/tests/frame/methods/test_rank.py +++ b/pandas/tests/frame/methods/test_rank.py @@ -40,7 +40,7 @@ def method(self, request): @td.skip_if_no_scipy def test_rank(self, float_frame): - import scipy.stats # noqa:F401 + import scipy.stats # noqa: F401 from scipy.stats import rankdata float_frame.loc[::2, "A"] = np.nan @@ -142,7 +142,7 @@ def test_rank_mixed_frame(self, float_string_frame): @td.skip_if_no_scipy def test_rank_na_option(self, float_frame): - import scipy.stats # noqa:F401 + import scipy.stats # noqa: F401 from scipy.stats import rankdata float_frame.loc[::2, "A"] = np.nan @@ -226,7 +226,7 @@ def test_rank_axis(self): @td.skip_if_no_scipy def test_rank_methods_frame(self): - import scipy.stats # noqa:F401 + import scipy.stats # noqa: F401 from scipy.stats import rankdata xs = np.random.randint(0, 21, (100, 26)) diff --git a/pandas/tests/frame/methods/test_tz_convert.py b/pandas/tests/frame/methods/test_tz_convert.py index b3d2bd795c45a1..8a484abaab54cb 100644 --- a/pandas/tests/frame/methods/test_tz_convert.py +++ b/pandas/tests/frame/methods/test_tz_convert.py @@ -91,7 +91,7 @@ def test_tz_convert_and_localize(self, fn): df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) # TODO: untested - df5 = getattr(df4, fn)("US/Pacific", level=1) # noqa + getattr(df4, fn)("US/Pacific", level=1) tm.assert_index_equal(df3.index.levels[0], l0) assert not df3.index.levels[0].equals(l0_expected) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 720106590cba37..0ddcbf87e3b4c5 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -79,8 +79,6 @@ def test_consolidate(self, float_frame): assert len(float_frame._mgr.blocks) == 1 def test_consolidate_inplace(self, float_frame): - frame = float_frame.copy() # noqa - # triggers in-place consolidation for letter in range(ord("A"), ord("Z")): float_frame[chr(letter)] = chr(letter) @@ -352,8 +350,8 @@ def test_stale_cached_series_bug_473(self, using_copy_on_write): else: Y["g"]["c"] = np.NaN repr(Y) - result = Y.sum() # noqa - exp = Y["g"].sum() # noqa + Y.sum() + Y["g"].sum() if using_copy_on_write: assert not pd.isna(Y["g"]["c"]) else: diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8624e54955d836..5c1fa5483555b2 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -262,12 +262,6 @@ def test_emptylike_constructor(self, emptylike, expected_index, expected_columns tm.assert_frame_equal(result, expected) def test_constructor_mixed(self, float_string_frame): - index, data = tm.getMixedTypeDict() - - # TODO(wesm), incomplete test? - indexed_frame = DataFrame(data, index=index) # noqa - unindexed_frame = DataFrame(data) # noqa - assert float_string_frame["foo"].dtype == np.object_ def test_constructor_cast_failure(self): diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index db68c4cd2546ba..ae199d5c373d53 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -109,7 +109,7 @@ def test_ops(self, op_str, op, rop, n): df.iloc[0] = 2 m = df.mean() - base = DataFrame( # noqa:F841 + base = DataFrame( # noqa: F841 np.tile(m.values, n).reshape(n, -1), columns=list("abcd") ) @@ -491,7 +491,7 @@ def test_query_scope(self, engine, parser): df = DataFrame(np.random.randn(20, 2), columns=list("ab")) - a, b = 1, 2 # noqa:F841 + a, b = 1, 2 # noqa: F841 res = df.query("a > b", engine=engine, parser=parser) expected = df[df.a > df.b] tm.assert_frame_equal(res, expected) @@ -641,7 +641,7 @@ def test_local_variable_with_in(self, engine, parser): def test_at_inside_string(self, engine, parser): skip_if_no_pandas_parser(parser) - c = 1 # noqa:F841 + c = 1 # noqa: F841 df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]}) result = df.query('a == "@c"', engine=engine, parser=parser) expected = df[df.a == "@c"] @@ -660,7 +660,7 @@ def test_query_undefined_local(self): def test_index_resolvers_come_after_columns_with_the_same_name( self, engine, parser ): - n = 1 # noqa:F841 + n = 1 # noqa: F841 a = np.r_[20:101:20] df = DataFrame({"index": a, "b": np.random.randn(a.size)}) @@ -805,7 +805,7 @@ def test_date_index_query_with_NaT_duplicates(self, engine, parser): def test_nested_scope(self, engine, parser): # smoke test - x = 1 # noqa:F841 + x = 1 # noqa: F841 result = pd.eval("x + 1", engine=engine, parser=parser) assert result == 2 @@ -1066,7 +1066,7 @@ def test_query_string_scalar_variable(self, parser, engine): } ) e = df[df.Symbol == "BUD US"] - symb = "BUD US" # noqa:F841 + symb = "BUD US" # noqa: F841 r = df.query("Symbol == @symb", parser=parser, engine=engine) tm.assert_frame_equal(e, r) @@ -1246,7 +1246,7 @@ def test_call_non_named_expression(self, df): def func(*_): return 1 - funcs = [func] # noqa:F841 + funcs = [func] # noqa: F841 df.eval("@func()") @@ -1303,7 +1303,7 @@ def test_query_ea_dtypes(self, dtype): pytest.importorskip("pyarrow") # GH#50261 df = DataFrame({"a": Series([1, 2], dtype=dtype)}) - ref = {2} # noqa:F841 + ref = {2} # noqa: F841 warning = RuntimeWarning if dtype == "Int64" and NUMEXPR_INSTALLED else None with tm.assert_produces_warning(warning): result = df.query("a in @ref") diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 383a008d1f32b1..1eab4225e3dd9c 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2633,7 +2633,7 @@ def test_groupby_filtered_df_std(): ] df = DataFrame(dicts) - df_filter = df[df["filter_col"] == True] # noqa:E712 + df_filter = df[df["filter_col"] == True] # noqa: E712 dfgb = df_filter.groupby("groupby_col") result = dfgb.std() expected = DataFrame( diff --git a/pandas/tests/indexes/categorical/test_formats.py b/pandas/tests/indexes/categorical/test_formats.py index 8e09f68c167077..7dbcaaa8d4ba6c 100644 --- a/pandas/tests/indexes/categorical/test_formats.py +++ b/pandas/tests/indexes/categorical/test_formats.py @@ -16,7 +16,7 @@ def test_format_different_scalar_lengths(self): def test_string_categorical_index_repr(self): # short idx = CategoricalIndex(["a", "bb", "ccc"]) - expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa:E501 + expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected # multiple lines @@ -24,7 +24,7 @@ def test_string_categorical_index_repr(self): expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], - categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa:E501 + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected @@ -33,7 +33,7 @@ def test_string_categorical_index_repr(self): expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', ... 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], - categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa:E501 + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa: E501 assert repr(idx) == expected @@ -41,13 +41,13 @@ def test_string_categorical_index_repr(self): idx = CategoricalIndex(list("abcdefghijklmmo")) expected = """CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'm', 'o'], - categories=['a', 'b', 'c', 'd', ..., 'k', 'l', 'm', 'o'], ordered=False, dtype='category')""" # noqa:E501 + categories=['a', 'b', 'c', 'd', ..., 'k', 'l', 'm', 'o'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected # short idx = CategoricalIndex(["あ", "いい", "ううう"]) - expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa:E501 + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected # multiple lines @@ -55,7 +55,7 @@ def test_string_categorical_index_repr(self): expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], - categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa:E501 + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected @@ -64,7 +64,7 @@ def test_string_categorical_index_repr(self): expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', ... 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], - categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa:E501 + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 assert repr(idx) == expected @@ -72,7 +72,7 @@ def test_string_categorical_index_repr(self): idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ'], - categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa:E501 + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected @@ -80,7 +80,7 @@ def test_string_categorical_index_repr(self): with cf.option_context("display.unicode.east_asian_width", True): # short idx = CategoricalIndex(["あ", "いい", "ううう"]) - expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa:E501 + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected # multiple lines @@ -89,7 +89,7 @@ def test_string_categorical_index_repr(self): 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], - categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa:E501 + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected @@ -100,7 +100,7 @@ def test_string_categorical_index_repr(self): ... 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], - categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa:E501 + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 assert repr(idx) == expected @@ -108,6 +108,6 @@ def test_string_categorical_index_repr(self): idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ'], - categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa:E501 + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 assert repr(idx) == expected diff --git a/pandas/tests/indexes/multi/test_formats.py b/pandas/tests/indexes/multi/test_formats.py index c3f6e1d88faba4..011f61fac90e83 100644 --- a/pandas/tests/indexes/multi/test_formats.py +++ b/pandas/tests/indexes/multi/test_formats.py @@ -178,7 +178,7 @@ def test_tuple_width(self, wide_multi_index): mi = wide_multi_index result = mi[:1].__repr__() expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)], - names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" # noqa:E501 + names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" # noqa: E501 assert result == expected result = mi[:10].__repr__() diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 6656face3be840..9083d725887f17 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -351,7 +351,7 @@ def test_escapechar(all_parsers): data = '''SEARCH_TERM,ACTUAL_URL "bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" -"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa:E501 +"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa: E501 parser = all_parsers result = parser.read_csv( @@ -457,7 +457,7 @@ def test_read_empty_with_usecols(all_parsers, data, kwargs, expected): ], ) def test_trailing_spaces(all_parsers, kwargs, expected): - data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa:E501 + data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa: E501 parser = all_parsers result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index 2a05a3aa3297e8..030650ad0031d6 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -329,7 +329,7 @@ def test_fwf_regression(): def test_fwf_for_uint8(): data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127 -1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa:E501 +1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa: E501 df = read_fwf( StringIO(data), colspecs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)], diff --git a/pandas/tests/io/pytables/test_select.py b/pandas/tests/io/pytables/test_select.py index 447d56ac91b244..f14a3ad7c5e109 100644 --- a/pandas/tests/io/pytables/test_select.py +++ b/pandas/tests/io/pytables/test_select.py @@ -181,12 +181,12 @@ def test_select_dtypes(setup_path): _maybe_remove(store, "df") store.append("df", df, data_columns=True) - expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa:E712 + expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa: E712 for v in [True, "true", 1]: result = store.select("df", f"boolv == {v}", columns=["A", "boolv"]) tm.assert_frame_equal(expected, result) - expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa:E712 + expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa: E712 for v in [False, "false", 0]: result = store.select("df", f"boolv == {v}", columns=["A", "boolv"]) tm.assert_frame_equal(expected, result) @@ -257,7 +257,7 @@ def test_select_dtypes(setup_path): expected = df[df["A"] > 0] store.append("df", df, data_columns=True) - np_zero = np.float64(0) # noqa:F841 + np_zero = np.float64(0) # noqa: F841 result = store.select("df", where=["A>np_zero"]) tm.assert_frame_equal(expected, result) @@ -659,7 +659,7 @@ def test_frame_select_complex2(tmp_path): expected = read_hdf(hh, "df", where="l1=[2, 3, 4]") # scope with list like - l0 = selection.index.tolist() # noqa:F841 + l0 = selection.index.tolist() # noqa: F841 with HDFStore(hh) as store: result = store.select("df", where="l1=l0") tm.assert_frame_equal(result, expected) @@ -668,7 +668,7 @@ def test_frame_select_complex2(tmp_path): tm.assert_frame_equal(result, expected) # index - index = selection.index # noqa:F841 + index = selection.index # noqa: F841 result = read_hdf(hh, "df", where="l1=index") tm.assert_frame_equal(result, expected) @@ -894,7 +894,7 @@ def test_query_compare_column_type(setup_path): with ensure_clean_store(setup_path) as store: store.append("test", df, format="table", data_columns=True) - ts = Timestamp("2014-01-01") # noqa:F841 + ts = Timestamp("2014-01-01") # noqa: F841 result = store.select("test", where="real_date > ts") expected = df.loc[[1], :] tm.assert_frame_equal(expected, result) diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 4bd4e0cd7146f6..bb62d1a194a3e4 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -201,7 +201,7 @@ def test_dtype_backend(self, string_storage, dtype_backend): check_before_test=True, ) def test_banklist_url(self): - url = "https://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501 + url = "https://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa: E501 df1 = self.read_html( # lxml cannot find attrs leave out for now url, diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index c47a5a55dfcf63..9bbc8f42b67046 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -439,20 +439,20 @@ def test_grouped_box_layout(self, hist_df): # _check_plot_works adds an ax so catch warning. see GH #13188 with tm.assert_produces_warning(UserWarning, check_stacklevel=False): - box = _check_plot_works( + _check_plot_works( df.groupby("gender").boxplot, column="height", return_type="dict" ) self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2)) with tm.assert_produces_warning(UserWarning, check_stacklevel=False): - box = _check_plot_works( + _check_plot_works( df.groupby("category").boxplot, column="height", return_type="dict" ) self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2)) # GH 6769 with tm.assert_produces_warning(UserWarning, check_stacklevel=False): - box = _check_plot_works( + _check_plot_works( df.groupby("classroom").boxplot, column="height", return_type="dict" ) self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2)) @@ -469,13 +469,13 @@ def test_grouped_box_layout(self, hist_df): self._check_visible(ax.get_xticklabels()) self._check_visible([ax.xaxis.get_label()]) - box = df.groupby("classroom").boxplot( + df.groupby("classroom").boxplot( column=["height", "weight", "category"], return_type="dict" ) self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2)) with tm.assert_produces_warning(UserWarning, check_stacklevel=False): - box = _check_plot_works( + _check_plot_works( df.groupby("category").boxplot, column="height", layout=(3, 2), @@ -483,7 +483,7 @@ def test_grouped_box_layout(self, hist_df): ) self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2)) with tm.assert_produces_warning(UserWarning, check_stacklevel=False): - box = _check_plot_works( + _check_plot_works( df.groupby("category").boxplot, column="height", layout=(3, -1), @@ -491,22 +491,18 @@ def test_grouped_box_layout(self, hist_df): ) self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2)) - box = df.boxplot( - column=["height", "weight", "category"], by="gender", layout=(4, 1) - ) + df.boxplot(column=["height", "weight", "category"], by="gender", layout=(4, 1)) self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1)) - box = df.boxplot( - column=["height", "weight", "category"], by="gender", layout=(-1, 1) - ) + df.boxplot(column=["height", "weight", "category"], by="gender", layout=(-1, 1)) self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1)) - box = df.groupby("classroom").boxplot( + df.groupby("classroom").boxplot( column=["height", "weight", "category"], layout=(1, 4), return_type="dict" ) self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4)) - box = df.groupby("classroom").boxplot( # noqa + df.groupby("classroom").boxplot( column=["height", "weight", "category"], layout=(1, -1), return_type="dict" ) self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3)) diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 04228bde1c6b95..3392c309e03291 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -123,7 +123,7 @@ def test_hist_no_overlap(self): def test_hist_by_no_extra_plots(self, hist_df): df = hist_df - axes = df.height.hist(by=df.gender) # noqa + df.height.hist(by=df.gender) assert len(self.plt.get_fignums()) == 1 def test_plot_fails_when_ax_differs_from_figure(self, ts): diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 28e99bd3c0cc05..9514ccd24c1ca3 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -21,7 +21,7 @@ # a fixture value can be overridden by the test parameter value. Note that the # value of the fixture can be overridden this way even if the test doesn't use # it directly (doesn't mention it in the function prototype). -# see https://docs.pytest.org/en/latest/fixture.html#override-a-fixture-with-direct-test-parametrization # noqa:E501 +# see https://docs.pytest.org/en/latest/fixture.html#override-a-fixture-with-direct-test-parametrization # noqa: E501 # in this module we override the fixture values defined in conftest.py # tuples of '_index_factory,_series_name,_index_start,_index_end' DATE_RANGE = (date_range, "dti", datetime(2005, 1, 1), datetime(2005, 1, 10)) diff --git a/pandas/tests/scalar/timestamp/test_rendering.py b/pandas/tests/scalar/timestamp/test_rendering.py index 216a055120a46f..c351fb23fca0a4 100644 --- a/pandas/tests/scalar/timestamp/test_rendering.py +++ b/pandas/tests/scalar/timestamp/test_rendering.py @@ -1,7 +1,7 @@ import pprint import pytest -import pytz # noqa # a test below uses pytz but only inside a `eval` call +import pytz # noqa: F401 # a test below uses pytz but only inside a `eval` call from pandas import Timestamp diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index be53209d889eee..3aa81c5e99ffed 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -887,20 +887,20 @@ def test_none_comparison(request, series_with_simple_index): series.iloc[0] = np.nan # noinspection PyComparisonWithNone - result = series == None # noqa:E711 + result = series == None # noqa: E711 assert not result.iat[0] assert not result.iat[1] # noinspection PyComparisonWithNone - result = series != None # noqa:E711 + result = series != None # noqa: E711 assert result.iat[0] assert result.iat[1] - result = None == series # noqa:E711 + result = None == series # noqa: E711 assert not result.iat[0] assert not result.iat[1] - result = None != series # noqa:E711 + result = None != series # noqa: E711 assert result.iat[0] assert result.iat[1] diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index cccc4953bc3c7a..c42b9f056878d9 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -376,7 +376,7 @@ def test_categorical_series_repr_datetime(self): 4 2011-01-01 13:00:00 dtype: category Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, - 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa:E501 + 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa: E501 assert repr(s) == exp @@ -390,7 +390,7 @@ def test_categorical_series_repr_datetime(self): dtype: category Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, - 2011-01-01 13:00:00-05:00]""" # noqa:E501 + 2011-01-01 13:00:00-05:00]""" # noqa: E501 assert repr(s) == exp @@ -404,7 +404,7 @@ def test_categorical_series_repr_datetime_ordered(self): 4 2011-01-01 13:00:00 dtype: category Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < - 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa:E501 + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 assert repr(s) == exp @@ -418,7 +418,7 @@ def test_categorical_series_repr_datetime_ordered(self): dtype: category Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < - 2011-01-01 13:00:00-05:00]""" # noqa:E501 + 2011-01-01 13:00:00-05:00]""" # noqa: E501 assert repr(s) == exp @@ -432,7 +432,7 @@ def test_categorical_series_repr_period(self): 4 2011-01-01 13:00 dtype: category Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, - 2011-01-01 13:00]""" # noqa:E501 + 2011-01-01 13:00]""" # noqa: E501 assert repr(s) == exp @@ -458,7 +458,7 @@ def test_categorical_series_repr_period_ordered(self): 4 2011-01-01 13:00 dtype: category Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < - 2011-01-01 13:00]""" # noqa:E501 + 2011-01-01 13:00]""" # noqa: E501 assert repr(s) == exp @@ -502,7 +502,7 @@ def test_categorical_series_repr_timedelta(self): dtype: category Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, - 8 days 01:00:00, 9 days 01:00:00]""" # noqa:E501 + 8 days 01:00:00, 9 days 01:00:00]""" # noqa: E501 assert repr(s) == exp @@ -534,6 +534,6 @@ def test_categorical_series_repr_timedelta_ordered(self): dtype: category Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < 3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 < - 8 days 01:00:00 < 9 days 01:00:00]""" # noqa:E501 + 8 days 01:00:00 < 9 days 01:00:00]""" # noqa: E501 assert repr(s) == exp diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index a97676578c079e..0f5fdbefd13d2d 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -40,8 +40,8 @@ def test_dask(df): olduse = pd.get_option("compute.use_numexpr") try: - toolz = import_module("toolz") # noqa:F841 - dask = import_module("dask") # noqa:F841 + toolz = import_module("toolz") # noqa: F841 + dask = import_module("dask") # noqa: F841 import dask.dataframe as dd @@ -58,7 +58,7 @@ def test_dask_ufunc(): olduse = pd.get_option("compute.use_numexpr") try: - dask = import_module("dask") # noqa:F841 + dask = import_module("dask") # noqa: F841 import dask.array as da import dask.dataframe as dd @@ -100,7 +100,7 @@ def test_construct_dask_float_array_int_dtype_match_ndarray(): def test_xarray(df): - xarray = import_module("xarray") # noqa:F841 + xarray = import_module("xarray") # noqa: F841 assert df.to_xarray() is not None @@ -142,7 +142,7 @@ def test_oo_optimized_datetime_index_unpickle(): @pytest.mark.network @tm.network def test_statsmodels(): - statsmodels = import_module("statsmodels") # noqa:F841 + statsmodels = import_module("statsmodels") # noqa: F841 import statsmodels.api as sm import statsmodels.formula.api as smf @@ -151,7 +151,7 @@ def test_statsmodels(): def test_scikit_learn(): - sklearn = import_module("sklearn") # noqa:F841 + sklearn = import_module("sklearn") # noqa: F841 from sklearn import ( datasets, svm, @@ -174,7 +174,7 @@ def test_seaborn(): def test_pandas_gbq(): # Older versions import from non-public, non-existent pandas funcs pytest.importorskip("pandas_gbq", minversion="0.10.0") - pandas_gbq = import_module("pandas_gbq") # noqa:F841 + pandas_gbq = import_module("pandas_gbq") # noqa: F841 @pytest.mark.network @@ -253,7 +253,7 @@ def test_frame_setitem_dask_array_into_new_col(): olduse = pd.get_option("compute.use_numexpr") try: - dask = import_module("dask") # noqa:F841 + dask = import_module("dask") # noqa: F841 import dask.array as da diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py index aa31c024fe3388..89ac6e2963e986 100644 --- a/pandas/util/__init__.py +++ b/pandas/util/__init__.py @@ -1,11 +1,11 @@ # pyright: reportUnusedImport = false -from pandas.util._decorators import ( # noqa:F401 +from pandas.util._decorators import ( # noqa: F401 Appender, Substitution, cache_readonly, ) -from pandas.core.util.hashing import ( # noqa:F401 +from pandas.core.util.hashing import ( # noqa: F401 hash_array, hash_pandas_object, ) diff --git a/pyproject.toml b/pyproject.toml index 6eef8e4fa9b7cd..7caf1f2a54f268 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -226,6 +226,8 @@ select = [ "TCH", # comprehensions "C4", + # pygrep-hooks + "PGH" ] ignore = [ @@ -277,6 +279,8 @@ ignore = [ "PLW0603", # Docstrings should not be included in stubs "PYI021", + # No builtin `eval()` allowed + "PGH001", # compare-to-empty-string "PLC1901", # Use typing_extensions.TypeAlias for type aliases