diff --git a/ci/code_checks.sh b/ci/code_checks.sh index cabc25b5e0ba5d..26c8ae1298630e 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -80,12 +80,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.CategoricalIndex.codes SA01" \ -i "pandas.CategoricalIndex.ordered SA01" \ -i "pandas.DataFrame.__dataframe__ SA01" \ - -i "pandas.DataFrame.__iter__ SA01" \ -i "pandas.DataFrame.at_time PR01" \ - -i "pandas.DataFrame.columns SA01" \ - -i "pandas.DataFrame.droplevel SA01" \ - -i "pandas.DataFrame.hist RT03" \ - -i "pandas.DataFrame.infer_objects RT03" \ -i "pandas.DataFrame.kurt RT03,SA01" \ -i "pandas.DataFrame.kurtosis RT03,SA01" \ -i "pandas.DataFrame.max RT03" \ @@ -93,93 +88,44 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.DataFrame.median RT03,SA01" \ -i "pandas.DataFrame.min RT03" \ -i "pandas.DataFrame.plot PR02,SA01" \ - -i "pandas.DataFrame.pop SA01" \ -i "pandas.DataFrame.prod RT03" \ -i "pandas.DataFrame.product RT03" \ - -i "pandas.DataFrame.reorder_levels SA01" \ -i "pandas.DataFrame.sem PR01,RT03,SA01" \ -i "pandas.DataFrame.skew RT03,SA01" \ -i "pandas.DataFrame.sparse PR01" \ -i "pandas.DataFrame.std PR01,RT03,SA01" \ -i "pandas.DataFrame.sum RT03" \ -i "pandas.DataFrame.swaplevel SA01" \ - -i "pandas.DataFrame.to_feather SA01" \ -i "pandas.DataFrame.to_markdown SA01" \ - -i "pandas.DataFrame.to_parquet RT03" \ -i "pandas.DataFrame.var PR01,RT03,SA01" \ - -i "pandas.DatetimeIndex.ceil SA01" \ - -i "pandas.DatetimeIndex.date SA01" \ - -i "pandas.DatetimeIndex.day_of_year SA01" \ - -i "pandas.DatetimeIndex.dayofyear SA01" \ - -i "pandas.DatetimeIndex.floor SA01" \ - -i "pandas.DatetimeIndex.freqstr SA01" \ -i "pandas.DatetimeIndex.indexer_at_time PR01,RT03" \ - -i "pandas.DatetimeIndex.indexer_between_time RT03" \ - -i "pandas.DatetimeIndex.inferred_freq SA01" \ - -i "pandas.DatetimeIndex.is_leap_year SA01" \ - -i "pandas.DatetimeIndex.quarter SA01" \ - -i "pandas.DatetimeIndex.round SA01" \ - -i "pandas.DatetimeIndex.snap PR01,RT03,SA01" \ - -i "pandas.DatetimeIndex.std PR01,RT03" \ - -i "pandas.DatetimeIndex.time SA01" \ - -i "pandas.DatetimeIndex.timetz SA01" \ + -i "pandas.DatetimeIndex.snap PR01,RT03" \ -i "pandas.DatetimeIndex.to_period RT03" \ -i "pandas.DatetimeIndex.to_pydatetime RT03,SA01" \ - -i "pandas.DatetimeIndex.tz SA01" \ - -i "pandas.DatetimeIndex.tz_convert RT03" \ - -i "pandas.DatetimeTZDtype SA01" \ - -i "pandas.DatetimeTZDtype.tz SA01" \ - -i "pandas.DatetimeTZDtype.unit SA01" \ -i "pandas.Grouper PR02" \ - -i "pandas.HDFStore.info RT03,SA01" \ - -i "pandas.HDFStore.keys SA01" \ - -i "pandas.HDFStore.put PR01,SA01" \ - -i "pandas.HDFStore.select SA01" \ - -i "pandas.HDFStore.walk SA01" \ -i "pandas.Index PR07" \ - -i "pandas.Index.T SA01" \ -i "pandas.Index.append PR07,RT03,SA01" \ - -i "pandas.Index.astype SA01" \ - -i "pandas.Index.copy PR07,SA01" \ -i "pandas.Index.difference PR07,RT03,SA01" \ -i "pandas.Index.drop PR07,SA01" \ - -i "pandas.Index.drop_duplicates RT03" \ - -i "pandas.Index.droplevel RT03,SA01" \ - -i "pandas.Index.dropna RT03,SA01" \ - -i "pandas.Index.dtype SA01" \ -i "pandas.Index.duplicated RT03" \ - -i "pandas.Index.empty GL08" \ - -i "pandas.Index.equals SA01" \ - -i "pandas.Index.fillna RT03" \ -i "pandas.Index.get_indexer PR07,SA01" \ -i "pandas.Index.get_indexer_for PR01,SA01" \ -i "pandas.Index.get_indexer_non_unique PR07,SA01" \ -i "pandas.Index.get_loc PR07,RT03,SA01" \ - -i "pandas.Index.get_slice_bound PR07" \ - -i "pandas.Index.hasnans SA01" \ -i "pandas.Index.identical PR01,SA01" \ - -i "pandas.Index.inferred_type SA01" \ -i "pandas.Index.insert PR07,RT03,SA01" \ -i "pandas.Index.intersection PR07,RT03,SA01" \ - -i "pandas.Index.item SA01" \ -i "pandas.Index.join PR07,RT03,SA01" \ - -i "pandas.Index.map SA01" \ - -i "pandas.Index.memory_usage RT03" \ - -i "pandas.Index.name SA01" \ -i "pandas.Index.names GL08" \ - -i "pandas.Index.nbytes SA01" \ -i "pandas.Index.nunique RT03" \ -i "pandas.Index.putmask PR01,RT03" \ -i "pandas.Index.ravel PR01,RT03" \ -i "pandas.Index.reindex PR07" \ -i "pandas.Index.slice_indexer PR07,RT03,SA01" \ - -i "pandas.Index.slice_locs RT03" \ -i "pandas.Index.str PR01,SA01" \ -i "pandas.Index.symmetric_difference PR07,RT03,SA01" \ -i "pandas.Index.take PR01,PR07" \ - -i "pandas.Index.to_list RT03" \ -i "pandas.Index.union PR07,RT03,SA01" \ - -i "pandas.Index.unique RT03" \ -i "pandas.Index.view GL08" \ -i "pandas.Int16Dtype SA01" \ -i "pandas.Int32Dtype SA01" \ @@ -207,7 +153,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.MultiIndex.append PR07,SA01" \ -i "pandas.MultiIndex.copy PR07,RT03,SA01" \ -i "pandas.MultiIndex.drop PR07,RT03,SA01" \ - -i "pandas.MultiIndex.droplevel RT03,SA01" \ -i "pandas.MultiIndex.dtypes SA01" \ -i "pandas.MultiIndex.get_indexer PR07,SA01" \ -i "pandas.MultiIndex.get_level_values SA01" \ @@ -248,7 +193,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.PeriodIndex.dayofyear SA01" \ -i "pandas.PeriodIndex.days_in_month SA01" \ -i "pandas.PeriodIndex.daysinmonth SA01" \ - -i "pandas.PeriodIndex.freqstr SA01" \ -i "pandas.PeriodIndex.from_fields PR07,SA01" \ -i "pandas.PeriodIndex.from_ordinals SA01" \ -i "pandas.PeriodIndex.hour SA01" \ @@ -269,7 +213,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.RangeIndex.step SA01" \ -i "pandas.RangeIndex.stop SA01" \ -i "pandas.Series SA01" \ - -i "pandas.Series.T SA01" \ -i "pandas.Series.__iter__ RT03,SA01" \ -i "pandas.Series.add PR07" \ -i "pandas.Series.at_time PR01" \ @@ -287,49 +230,37 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.Series.cat.reorder_categories PR01,PR02" \ -i "pandas.Series.cat.set_categories PR01,PR02" \ -i "pandas.Series.div PR07" \ - -i "pandas.Series.droplevel SA01" \ -i "pandas.Series.dt.as_unit PR01,PR02" \ - -i "pandas.Series.dt.ceil PR01,PR02,SA01" \ + -i "pandas.Series.dt.ceil PR01,PR02" \ -i "pandas.Series.dt.components SA01" \ - -i "pandas.Series.dt.date SA01" \ -i "pandas.Series.dt.day_name PR01,PR02" \ - -i "pandas.Series.dt.day_of_year SA01" \ - -i "pandas.Series.dt.dayofyear SA01" \ -i "pandas.Series.dt.days SA01" \ -i "pandas.Series.dt.days_in_month SA01" \ -i "pandas.Series.dt.daysinmonth SA01" \ - -i "pandas.Series.dt.floor PR01,PR02,SA01" \ + -i "pandas.Series.dt.floor PR01,PR02" \ -i "pandas.Series.dt.freq GL08" \ - -i "pandas.Series.dt.is_leap_year SA01" \ -i "pandas.Series.dt.microseconds SA01" \ -i "pandas.Series.dt.month_name PR01,PR02" \ -i "pandas.Series.dt.nanoseconds SA01" \ -i "pandas.Series.dt.normalize PR01" \ - -i "pandas.Series.dt.quarter SA01" \ -i "pandas.Series.dt.qyear GL08" \ - -i "pandas.Series.dt.round PR01,PR02,SA01" \ + -i "pandas.Series.dt.round PR01,PR02" \ -i "pandas.Series.dt.seconds SA01" \ -i "pandas.Series.dt.strftime PR01,PR02" \ - -i "pandas.Series.dt.time SA01" \ - -i "pandas.Series.dt.timetz SA01" \ -i "pandas.Series.dt.to_period PR01,PR02,RT03" \ -i "pandas.Series.dt.total_seconds PR01" \ - -i "pandas.Series.dt.tz SA01" \ - -i "pandas.Series.dt.tz_convert PR01,PR02,RT03" \ + -i "pandas.Series.dt.tz_convert PR01,PR02" \ -i "pandas.Series.dt.tz_localize PR01,PR02" \ -i "pandas.Series.dt.unit GL08" \ -i "pandas.Series.dtype SA01" \ - -i "pandas.Series.empty GL08" \ -i "pandas.Series.eq PR07,SA01" \ -i "pandas.Series.floordiv PR07" \ -i "pandas.Series.ge PR07,SA01" \ -i "pandas.Series.gt PR07,SA01" \ -i "pandas.Series.hasnans SA01" \ - -i "pandas.Series.infer_objects RT03" \ -i "pandas.Series.is_monotonic_decreasing SA01" \ -i "pandas.Series.is_monotonic_increasing SA01" \ -i "pandas.Series.is_unique SA01" \ - -i "pandas.Series.item SA01" \ -i "pandas.Series.kurt RT03,SA01" \ -i "pandas.Series.kurtosis RT03,SA01" \ -i "pandas.Series.le PR07,SA01" \ @@ -344,7 +275,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.Series.mod PR07" \ -i "pandas.Series.mode SA01" \ -i "pandas.Series.mul PR07" \ - -i "pandas.Series.nbytes SA01" \ -i "pandas.Series.ne PR07,SA01" \ -i "pandas.Series.nunique RT03" \ -i "pandas.Series.pad PR01,SA01" \ @@ -409,7 +339,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.Series.swaplevel SA01" \ -i "pandas.Series.to_dict SA01" \ -i "pandas.Series.to_frame SA01" \ - -i "pandas.Series.to_list RT03" \ -i "pandas.Series.to_markdown SA01" \ -i "pandas.Series.to_string SA01" \ -i "pandas.Series.truediv PR07" \ @@ -432,14 +361,10 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.Timedelta.total_seconds SA01" \ -i "pandas.Timedelta.view SA01" \ -i "pandas.TimedeltaIndex.as_unit RT03,SA01" \ - -i "pandas.TimedeltaIndex.ceil SA01" \ -i "pandas.TimedeltaIndex.components SA01" \ -i "pandas.TimedeltaIndex.days SA01" \ - -i "pandas.TimedeltaIndex.floor SA01" \ - -i "pandas.TimedeltaIndex.inferred_freq SA01" \ -i "pandas.TimedeltaIndex.microseconds SA01" \ -i "pandas.TimedeltaIndex.nanoseconds SA01" \ - -i "pandas.TimedeltaIndex.round SA01" \ -i "pandas.TimedeltaIndex.seconds SA01" \ -i "pandas.TimedeltaIndex.to_pytimedelta RT03,SA01" \ -i "pandas.Timestamp PR07,SA01" \ diff --git a/doc/source/development/community.rst b/doc/source/development/community.rst index ccf7be8e477482..ab8294b8f135a6 100644 --- a/doc/source/development/community.rst +++ b/doc/source/development/community.rst @@ -100,6 +100,8 @@ The pandas mailing list `pandas-dev@python.org `_. + .. _community.slack: Community slack diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst index 39e279fd5c917e..28129440b86d7d 100644 --- a/doc/source/development/contributing_codebase.rst +++ b/doc/source/development/contributing_codebase.rst @@ -557,11 +557,12 @@ is being raised, using ``pytest.raises`` instead. Testing a warning ^^^^^^^^^^^^^^^^^ -Use ``tm.assert_produces_warning`` as a context manager to check that a block of code raises a warning. +Use ``tm.assert_produces_warning`` as a context manager to check that a block of code raises a warning +and specify the warning message using the ``match`` argument. .. code-block:: python - with tm.assert_produces_warning(DeprecationWarning): + with tm.assert_produces_warning(DeprecationWarning, match="the warning message"): pd.deprecated_function() If a warning should specifically not happen in a block of code, pass ``False`` into the context manager. diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index f831723f449310..43da43a983429a 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -1908,7 +1908,7 @@ "- Provide an API that is pleasing to use interactively and is \"good enough\" for many tasks\n", "- Provide the foundations for dedicated libraries to build on\n", "\n", - "If you build a great library on top of this, let us know and we'll [link](https://pandas.pydata.org/pandas-docs/stable/ecosystem.html) to it.\n", + "If you build a great library on top of this, let us know and we'll [link](https://pandas.pydata.org/community/ecosystem.html) to it.\n", "\n", "### Subclassing\n", "\n", diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 5f5ceb8693c443..106c2579cd31a2 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -41,6 +41,7 @@ Other enhancements - :class:`.errors.DtypeWarning` improved to include column names when mixed data types are detected (:issue:`58174`) - :meth:`DataFrame.cummin`, :meth:`DataFrame.cummax`, :meth:`DataFrame.cumprod` and :meth:`DataFrame.cumsum` methods now have a ``numeric_only`` parameter (:issue:`53072`) - :meth:`DataFrame.fillna` and :meth:`Series.fillna` can now accept ``value=None``; for non-object dtype the corresponding NA value will be used (:issue:`57723`) +- :meth:`Series.cummin` and :meth:`Series.cummax` now supports :class:`CategoricalDtype` (:issue:`52335`) .. --------------------------------------------------------------------------- .. _whatsnew_300.notable_bug_fixes: @@ -158,6 +159,7 @@ Other API changes - Updated :meth:`DataFrame.to_excel` so that the output spreadsheet has no styling. Custom styling can still be done using :meth:`Styler.to_excel` (:issue:`54154`) - pickle and HDF (``.h5``) files created with Python 2 are no longer explicitly supported (:issue:`57387`) - pickled objects from pandas version less than ``1.0.0`` are no longer supported (:issue:`57155`) +- when comparing the indexes in :func:`testing.assert_series_equal`, check_exact defaults to True if an :class:`Index` is of integer dtypes. (:issue:`57386`) .. --------------------------------------------------------------------------- .. _whatsnew_300.deprecations: @@ -199,6 +201,7 @@ Other Deprecations - Deprecated allowing non-keyword arguments in :meth:`DataFrame.all`, :meth:`DataFrame.min`, :meth:`DataFrame.max`, :meth:`DataFrame.sum`, :meth:`DataFrame.prod`, :meth:`DataFrame.mean`, :meth:`DataFrame.median`, :meth:`DataFrame.sem`, :meth:`DataFrame.var`, :meth:`DataFrame.std`, :meth:`DataFrame.skew`, :meth:`DataFrame.kurt`, :meth:`Series.all`, :meth:`Series.min`, :meth:`Series.max`, :meth:`Series.sum`, :meth:`Series.prod`, :meth:`Series.mean`, :meth:`Series.median`, :meth:`Series.sem`, :meth:`Series.var`, :meth:`Series.std`, :meth:`Series.skew`, and :meth:`Series.kurt`. (:issue:`57087`) - Deprecated allowing non-keyword arguments in :meth:`Series.to_markdown` except ``buf``. (:issue:`57280`) - Deprecated allowing non-keyword arguments in :meth:`Series.to_string` except ``buf``. (:issue:`57280`) +- Deprecated behavior of :meth:`Series.dt.to_pytimedelta`, in a future version this will return a :class:`Series` containing python ``datetime.timedelta`` objects instead of an ``ndarray`` of timedelta; this matches the behavior of other :meth:`Series.dt` properties. (:issue:`57463`) - Deprecated using ``epoch`` date format in :meth:`DataFrame.to_json` and :meth:`Series.to_json`, use ``iso`` instead. (:issue:`57063`) - @@ -211,6 +214,7 @@ Removal of prior version deprecations/changes - :func:`concat` no longer ignores empty objects when determining output dtypes (:issue:`39122`) - :func:`concat` with all-NA entries no longer ignores the dtype of those entries when determining the result dtype (:issue:`40893`) - :func:`read_excel`, :func:`read_json`, :func:`read_html`, and :func:`read_xml` no longer accept raw string or byte representation of the data. That type of data must be wrapped in a :py:class:`StringIO` or :py:class:`BytesIO` (:issue:`53767`) +- :func:`to_datetime` with a ``unit`` specified no longer parses strings into floats, instead parses them the same way as without ``unit`` (:issue:`50735`) - :meth:`DataFrame.groupby` with ``as_index=False`` and aggregation methods will no longer exclude from the result the groupings that do not arise from the input (:issue:`49519`) - :meth:`Series.dt.to_pydatetime` now returns a :class:`Series` of :py:class:`datetime.datetime` objects (:issue:`52459`) - :meth:`SeriesGroupBy.agg` no longer pins the name of the group to the input passed to the provided ``func`` (:issue:`51703`) @@ -220,6 +224,7 @@ Removal of prior version deprecations/changes - Disallow automatic casting to object in :class:`Series` logical operations (``&``, ``^``, ``||``) between series with mismatched indexes and dtypes other than ``object`` or ``bool`` (:issue:`52538`) - Disallow calling :meth:`Series.replace` or :meth:`DataFrame.replace` without a ``value`` and with non-dict-like ``to_replace`` (:issue:`33302`) - Disallow constructing a :class:`arrays.SparseArray` with scalar data (:issue:`53039`) +- Disallow indexing an :class:`Index` with a boolean indexer of length zero, it now raises ``ValueError`` (:issue:`55820`) - Disallow non-standard (``np.ndarray``, :class:`Index`, :class:`ExtensionArray`, or :class:`Series`) to :func:`isin`, :func:`unique`, :func:`factorize` (:issue:`52986`) - Disallow passing a pandas type to :meth:`Index.view` (:issue:`55709`) - Disallow units other than "s", "ms", "us", "ns" for datetime64 and timedelta64 dtypes in :func:`array` (:issue:`53817`) @@ -329,6 +334,7 @@ Performance improvements - Performance improvement in :meth:`Index.take` when ``indices`` is a full range indexer from zero to length of index (:issue:`56806`) - Performance improvement in :meth:`Index.to_frame` returning a :class:`RangeIndex` columns of a :class:`Index` when possible. (:issue:`58018`) - Performance improvement in :meth:`MultiIndex.equals` for equal length indexes (:issue:`56990`) +- Performance improvement in :meth:`MultiIndex.memory_usage` to ignore the index engine when it isn't already cached. (:issue:`58385`) - Performance improvement in :meth:`RangeIndex.__getitem__` with a boolean mask or integers returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57588`) - Performance improvement in :meth:`RangeIndex.append` when appending the same index (:issue:`57252`) - Performance improvement in :meth:`RangeIndex.argmin` and :meth:`RangeIndex.argmax` (:issue:`57823`) @@ -360,6 +366,7 @@ Datetimelike - Bug in :func:`date_range` where the last valid timestamp would sometimes not be produced (:issue:`56134`) - Bug in :func:`date_range` where using a negative frequency value would not include all points between the start and end values (:issue:`56382`) - Bug in :func:`tseries.api.guess_datetime_format` would fail to infer time format when "%Y" == "%H%M" (:issue:`57452`) +- Bug in setting scalar values with mismatched resolution into arrays with non-nanosecond ``datetime64``, ``timedelta64`` or :class:`DatetimeTZDtype` incorrectly truncating those scalars (:issue:`56410`) Timedelta ^^^^^^^^^ @@ -410,6 +417,7 @@ MultiIndex I/O ^^^ - Bug in :class:`DataFrame` and :class:`Series` ``repr`` of :py:class:`collections.abc.Mapping`` elements. (:issue:`57915`) +- Bug in :meth:`DataFrame.to_dict` raises unnecessary ``UserWarning`` when columns are not unique and ``orient='tight'``. (:issue:`58281`) - Bug in :meth:`DataFrame.to_excel` when writing empty :class:`DataFrame` with :class:`MultiIndex` on both axes (:issue:`57696`) - Bug in :meth:`DataFrame.to_string` that raised ``StopIteration`` with nested DataFrames. (:issue:`16098`) - Bug in :meth:`read_csv` raising ``TypeError`` when ``index_col`` is specified and ``na_values`` is a dict containing the key ``None``. (:issue:`57547`) @@ -430,6 +438,7 @@ Groupby/resample/rolling - Bug in :meth:`.DataFrameGroupBy.groups` and :meth:`.SeriesGroupby.groups` that would not respect groupby argument ``dropna`` (:issue:`55919`) - Bug in :meth:`.DataFrameGroupBy.median` where nat values gave an incorrect result. (:issue:`57926`) - Bug in :meth:`.DataFrameGroupBy.quantile` when ``interpolation="nearest"`` is inconsistent with :meth:`DataFrame.quantile` (:issue:`47942`) +- Bug in :meth:`.Resampler.interpolate` on a :class:`DataFrame` with non-uniform sampling and/or indices not aligning with the resulting resampled index would result in wrong interpolation (:issue:`21351`) - Bug in :meth:`DataFrame.ewm` and :meth:`Series.ewm` when passed ``times`` and aggregation functions other than mean (:issue:`51695`) - Bug in :meth:`DataFrameGroupBy.apply` that was returning a completely empty DataFrame when all return values of ``func`` were ``None`` instead of returning an empty DataFrame with the original columns and dtypes. (:issue:`57775`) - Bug in :meth:`DataFrameGroupBy.apply` with ``as_index=False`` that was returning :class:`MultiIndex` instead of returning :class:`Index`. (:issue:`58291`) @@ -459,6 +468,7 @@ Other - Bug in :class:`DataFrame` when passing a ``dict`` with a NA scalar and ``columns`` that would always return ``np.nan`` (:issue:`57205`) - Bug in :func:`unique` on :class:`Index` not always returning :class:`Index` (:issue:`57043`) - Bug in :meth:`DataFrame.eval` and :meth:`DataFrame.query` which caused an exception when using NumPy attributes via ``@`` notation, e.g., ``df.eval("@np.floor(a)")``. (:issue:`58041`) +- Bug in :meth:`DataFrame.eval` and :meth:`DataFrame.query` which did not allow to use ``tan`` function. (:issue:`55091`) - Bug in :meth:`DataFrame.sort_index` when passing ``axis="columns"`` and ``ignore_index=True`` and ``ascending=False`` not returning a :class:`RangeIndex` columns (:issue:`57293`) - Bug in :meth:`DataFrame.transform` that was returning the wrong order unless the index was monotonically increasing. (:issue:`57069`) - Bug in :meth:`DataFrame.where` where using a non-bool type array in the function would return a ``ValueError`` instead of a ``TypeError`` (:issue:`56330`) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 7aa1cb715521eb..5b6d83ba8e9ee5 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -477,7 +477,7 @@ def has_infs(const floating[:] arr) -> bool: @cython.boundscheck(False) @cython.wraparound(False) -def has_only_ints_or_nan(floating[:] arr) -> bool: +def has_only_ints_or_nan(const floating[:] arr) -> bool: cdef: floating val intp_t i @@ -631,7 +631,7 @@ ctypedef fused int6432_t: @cython.wraparound(False) @cython.boundscheck(False) -def is_range_indexer(ndarray[int6432_t, ndim=1] left, Py_ssize_t n) -> bool: +def is_range_indexer(const int6432_t[:] left, Py_ssize_t n) -> bool: """ Perform an element by element comparison on 1-d integer arrays, meant for indexer comparisons @@ -652,7 +652,7 @@ def is_range_indexer(ndarray[int6432_t, ndim=1] left, Py_ssize_t n) -> bool: @cython.wraparound(False) @cython.boundscheck(False) -def is_sequence_range(ndarray[int6432_t, ndim=1] sequence, int64_t step) -> bool: +def is_sequence_range(const int6432_t[:] sequence, int64_t step) -> bool: """ Check if sequence is equivalent to a range with the specified step. """ @@ -2628,7 +2628,11 @@ def maybe_convert_objects(ndarray[object] objects, seen.object_ = True break elif val is C_NA: - seen.object_ = True + if convert_to_nullable_dtype: + seen.null_ = True + mask[i] = True + else: + seen.object_ = True continue else: seen.object_ = True @@ -2691,6 +2695,12 @@ def maybe_convert_objects(ndarray[object] objects, dtype = StringDtype(storage="pyarrow_numpy") return dtype.construct_array_type()._from_sequence(objects, dtype=dtype) + elif convert_to_nullable_dtype and is_string_array(objects, skipna=True): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype() + return dtype.construct_array_type()._from_sequence(objects, dtype=dtype) + seen.object_ = True elif seen.interval_: if is_interval_array(objects): @@ -2734,12 +2744,12 @@ def maybe_convert_objects(ndarray[object] objects, return objects if seen.bool_: - if seen.is_bool: - # is_bool property rules out everything else - return bools.view(np.bool_) - elif convert_to_nullable_dtype and seen.is_bool_or_na: + if convert_to_nullable_dtype and seen.is_bool_or_na: from pandas.core.arrays import BooleanArray return BooleanArray(bools.view(np.bool_), mask) + elif seen.is_bool: + # is_bool property rules out everything else + return bools.view(np.bool_) seen.object_ = True if not seen.object_: @@ -2752,11 +2762,11 @@ def maybe_convert_objects(ndarray[object] objects, result = floats elif seen.int_ or seen.uint_: if convert_to_nullable_dtype: - from pandas.core.arrays import IntegerArray + # Below we will wrap in IntegerArray if seen.uint_: - result = IntegerArray(uints, mask) + result = uints else: - result = IntegerArray(ints, mask) + result = ints else: result = floats elif seen.nan_: @@ -2771,7 +2781,6 @@ def maybe_convert_objects(ndarray[object] objects, result = uints else: result = ints - else: # don't cast int to float, etc. if seen.null_: @@ -2794,6 +2803,22 @@ def maybe_convert_objects(ndarray[object] objects, else: result = ints + # TODO: do these after the itemsize check? + if (result is ints or result is uints) and convert_to_nullable_dtype: + from pandas.core.arrays import IntegerArray + + # Set these values to 1 to be deterministic, match + # IntegerArray._internal_fill_value + result[mask] = 1 + result = IntegerArray(result, mask) + elif result is floats and convert_to_nullable_dtype: + from pandas.core.arrays import FloatingArray + + # Set these values to 1.0 to be deterministic, match + # FloatingArray._internal_fill_value + result[mask] = 1.0 + result = FloatingArray(result, mask) + if result is uints or result is ints or result is floats or result is complexes: # cast to the largest itemsize when all values are NumPy scalars if itemsize_max > 0 and itemsize_max != result.dtype.itemsize: diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 21d1405328da66..28ea06739e0c8d 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -19,7 +19,7 @@ from pandas._libs.lib cimport c_is_list_like @cython.wraparound(False) @cython.boundscheck(False) -def unstack(numeric_object_t[:, :] values, const uint8_t[:] mask, +def unstack(const numeric_object_t[:, :] values, const uint8_t[:] mask, Py_ssize_t stride, Py_ssize_t length, Py_ssize_t width, numeric_object_t[:, :] new_values, uint8_t[:, :] new_mask) -> None: """ @@ -80,7 +80,7 @@ def unstack(numeric_object_t[:, :] values, const uint8_t[:] mask, @cython.wraparound(False) @cython.boundscheck(False) -def explode(ndarray[object] values): +def explode(object[:] values): """ transform array list-likes to long form preserve non-list entries diff --git a/pandas/_libs/tslib.pyi b/pandas/_libs/tslib.pyi index 5a340c1d88bc43..7e3372a80db9db 100644 --- a/pandas/_libs/tslib.pyi +++ b/pandas/_libs/tslib.pyi @@ -11,11 +11,6 @@ def format_array_from_datetime( na_rep: str | float = ..., reso: int = ..., # NPY_DATETIMEUNIT ) -> npt.NDArray[np.object_]: ... -def array_with_unit_to_datetime( - values: npt.NDArray[np.object_], - unit: str, - errors: str = ..., -) -> tuple[np.ndarray, tzinfo | None]: ... def first_non_null(values: np.ndarray) -> int: ... def array_to_datetime( values: npt.NDArray[np.object_], @@ -24,6 +19,7 @@ def array_to_datetime( yearfirst: bool = ..., utc: bool = ..., creso: int = ..., + unit_for_numerics: str | None = ..., ) -> tuple[np.ndarray, tzinfo | None]: ... # returned ndarray may be object dtype or datetime64[ns] diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index aecf9f2e46bd47..dca3ba0ce49b31 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,7 +1,3 @@ -import warnings - -from pandas.util._exceptions import find_stack_level - cimport cython from datetime import timezone @@ -234,117 +230,6 @@ def format_array_from_datetime( return result -def array_with_unit_to_datetime( - ndarray[object] values, - str unit, - str errors="coerce" -): - """ - Convert the ndarray to datetime according to the time unit. - - This function converts an array of objects into a numpy array of - datetime64[ns]. It returns the converted array - and also returns the timezone offset - - if errors: - - raise: return converted values or raise OutOfBoundsDatetime - if out of range on the conversion or - ValueError for other conversions (e.g. a string) - - ignore: return non-convertible values as the same unit - - coerce: NaT for non-convertibles - - Parameters - ---------- - values : ndarray - Date-like objects to convert. - unit : str - Time unit to use during conversion. - errors : str, default 'raise' - Error behavior when parsing. - - Returns - ------- - result : ndarray of m8 values - tz : parsed timezone offset or None - """ - cdef: - Py_ssize_t i, n=len(values) - bint is_coerce = errors == "coerce" - bint is_raise = errors == "raise" - ndarray[int64_t] iresult - tzinfo tz = None - double fval - - assert is_coerce or is_raise - - if unit == "ns": - result, tz = array_to_datetime( - values.astype(object, copy=False), - errors=errors, - creso=NPY_FR_ns, - ) - return result, tz - - result = np.empty(n, dtype="M8[ns]") - iresult = result.view("i8") - - for i in range(n): - val = values[i] - - try: - if checknull_with_nat_and_na(val): - iresult[i] = NPY_NAT - - elif is_integer_object(val) or is_float_object(val): - - if val != val or val == NPY_NAT: - iresult[i] = NPY_NAT - else: - iresult[i] = cast_from_unit(val, unit) - - elif isinstance(val, str): - if len(val) == 0 or val in nat_strings: - iresult[i] = NPY_NAT - - else: - - try: - fval = float(val) - except ValueError: - raise ValueError( - f"non convertible value {val} with the unit '{unit}'" - ) - warnings.warn( - "The behavior of 'to_datetime' with 'unit' when parsing " - "strings is deprecated. In a future version, strings will " - "be parsed as datetime strings, matching the behavior " - "without a 'unit'. To retain the old behavior, explicitly " - "cast ints or floats to numeric type before calling " - "to_datetime.", - FutureWarning, - stacklevel=find_stack_level(), - ) - - iresult[i] = cast_from_unit(fval, unit) - - else: - # TODO: makes more sense as TypeError, but that would be an - # API change. - raise ValueError( - f"unit='{unit}' not valid with non-numerical val='{val}'" - ) - - except (ValueError, TypeError) as err: - if is_raise: - err.args = (f"{err}, at position {i}",) - raise - else: - # is_coerce - iresult[i] = NPY_NAT - - return result, tz - - @cython.wraparound(False) @cython.boundscheck(False) def first_non_null(values: ndarray) -> int: @@ -376,6 +261,7 @@ cpdef array_to_datetime( bint yearfirst=False, bint utc=False, NPY_DATETIMEUNIT creso=NPY_FR_ns, + str unit_for_numerics=None, ): """ Converts a 1D array of date-like values to a numpy array of either: @@ -404,6 +290,7 @@ cpdef array_to_datetime( indicator whether the dates should be UTC creso : NPY_DATETIMEUNIT, default NPY_FR_ns Set to NPY_FR_GENERIC to infer a resolution. + unit_for_numerics : str, default "ns" Returns ------- @@ -434,6 +321,13 @@ cpdef array_to_datetime( abbrev = "ns" else: abbrev = npy_unit_to_abbrev(creso) + + if unit_for_numerics is not None: + # either creso or unit_for_numerics should be passed, not both + assert creso == NPY_FR_ns + else: + unit_for_numerics = abbrev + result = np.empty((values).shape, dtype=f"M8[{abbrev}]") iresult = result.view("i8").ravel() @@ -485,7 +379,8 @@ cpdef array_to_datetime( creso = state.creso # we now need to parse this as if unit=abbrev - iresult[i] = cast_from_unit(val, abbrev, out_reso=creso) + iresult[i] = cast_from_unit(val, unit_for_numerics, out_reso=creso) + state.found_other = True elif isinstance(val, str): diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index 3aacd3099c3346..543d7944e4c5d2 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -861,12 +861,19 @@ def assert_series_equal( check_names : bool, default True Whether to check the Series and Index names attribute. check_exact : bool, default False - Whether to compare number exactly. + Whether to compare number exactly. This also applies when checking + Index equivalence. .. versionchanged:: 2.2.0 Defaults to True for integer dtypes if none of ``check_exact``, ``rtol`` and ``atol`` are specified. + + .. versionchanged:: 3.0.0 + + check_exact for comparing the Indexes defaults to True by + checking if an Index is of integer dtypes. + check_datetimelike_compat : bool, default False Compare datetime-like which is comparable ignoring dtype. check_categorical : bool, default True @@ -902,7 +909,6 @@ def assert_series_equal( >>> tm.assert_series_equal(a, b) """ __tracebackhide__ = True - check_exact_index = False if check_exact is lib.no_default else check_exact if ( check_exact is lib.no_default and rtol is lib.no_default @@ -914,8 +920,20 @@ def assert_series_equal( or is_numeric_dtype(right.dtype) and not is_float_dtype(right.dtype) ) + left_index_dtypes = ( + [left.index.dtype] if left.index.nlevels == 1 else left.index.dtypes + ) + right_index_dtypes = ( + [right.index.dtype] if right.index.nlevels == 1 else right.index.dtypes + ) + check_exact_index = all( + dtype.kind in "iu" for dtype in left_index_dtypes + ) or all(dtype.kind in "iu" for dtype in right_index_dtypes) elif check_exact is lib.no_default: check_exact = False + check_exact_index = False + else: + check_exact_index = check_exact rtol = rtol if rtol is not lib.no_default else 1.0e-5 atol = atol if atol is not lib.no_default else 1.0e-8 diff --git a/pandas/_typing.py b/pandas/_typing.py index 172b30c59fc13d..ef68018f2721aa 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -314,7 +314,7 @@ def readline(self) -> bytes: ... class WriteExcelBuffer(WriteBuffer[bytes], Protocol): - def truncate(self, size: int | None = ...) -> int: ... + def truncate(self, size: int | None = ..., /) -> int: ... class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol): diff --git a/pandas/conftest.py b/pandas/conftest.py index 34489bb70575af..21100178262c88 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -157,6 +157,7 @@ def pytest_collection_modifyitems(items, config) -> None: ("SeriesGroupBy.fillna", "SeriesGroupBy.fillna is deprecated"), ("SeriesGroupBy.idxmin", "The behavior of Series.idxmin"), ("SeriesGroupBy.idxmax", "The behavior of Series.idxmax"), + ("to_pytimedelta", "The behavior of TimedeltaProperties.to_pytimedelta"), # Docstring divides by zero to show behavior difference ("missing.mask_zero_div_zero", "divide by zero encountered"), ( diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 8d6880fc2acb37..6a3cf4590568cb 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -6,6 +6,7 @@ from shutil import get_terminal_size from typing import ( TYPE_CHECKING, + Callable, Literal, cast, overload, @@ -2508,6 +2509,28 @@ def equals(self, other: object) -> bool: return np.array_equal(self._codes, other._codes) return False + def _accumulate(self, name: str, skipna: bool = True, **kwargs) -> Self: + func: Callable + if name == "cummin": + func = np.minimum.accumulate + elif name == "cummax": + func = np.maximum.accumulate + else: + raise TypeError(f"Accumulation {name} not supported for {type(self)}") + self.check_for_ordered(name) + + codes = self.codes.copy() + mask = self.isna() + if func == np.minimum.accumulate: + codes[mask] = np.iinfo(codes.dtype.type).max + # no need to change codes for maximum because codes[mask] is already -1 + if not skipna: + mask = np.maximum.accumulate(mask) + + codes = func(codes) + codes[mask] = -1 + return self._simple_new(codes, dtype=self._dtype) + @classmethod def _concat_same_type(cls, to_concat: Sequence[Self], axis: AxisInt = 0) -> Self: from pandas.core.dtypes.concat import union_categoricals diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 8ada9d88e08bc0..ab17ae43215d2b 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -875,6 +875,11 @@ def freqstr(self) -> str | None: """ Return the frequency object as a string if it's set, otherwise None. + See Also + -------- + DatetimeIndex.inferred_freq : Returns a string representing a frequency + generated by infer_freq. + Examples -------- For DatetimeIndex: @@ -908,6 +913,11 @@ def inferred_freq(self) -> str | None: Returns None if it can't autodetect the frequency. + See Also + -------- + DatetimeIndex.freqstr : Return the frequency object as a string if it's set, + otherwise None. + Examples -------- For DatetimeIndex: @@ -1825,6 +1835,11 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]: ------ ValueError if the `freq` cannot be converted. + See Also + -------- + DatetimeIndex.floor : Perform floor operation on the data to the specified `freq`. + DatetimeIndex.snap : Snap time stamps to nearest occurring frequency. + Notes ----- If the timestamps have a timezone, {op}ing will take place relative to the diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 7704c99141fc20..0f59d62339bf24 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -539,7 +539,7 @@ def _unbox_scalar(self, value) -> np.datetime64: if value is NaT: return np.datetime64(value._value, self.unit) else: - return value.as_unit(self.unit).asm8 + return value.as_unit(self.unit, round_ok=False).asm8 def _scalar_from_string(self, value) -> Timestamp | NaTType: return Timestamp(value, tz=self.tz) @@ -593,6 +593,13 @@ def tz(self) -> tzinfo | None: datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None Returns None when the array is tz-naive. + See Also + -------- + DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a + given time zone, or remove timezone from a tz-aware DatetimeIndex. + DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from + one time zone to another. + Examples -------- For Series: @@ -860,6 +867,7 @@ def tz_convert(self, tz) -> Self: Returns ------- Array or Index + Datetme Array/Index with target `tz`. Raises ------ @@ -1391,6 +1399,14 @@ def time(self) -> npt.NDArray[np.object_]: The time part of the Timestamps. + See Also + -------- + DatetimeIndex.timetz : Returns numpy array of :class:`datetime.time` + objects with timezones. The time part of the Timestamps. + DatetimeIndex.date : Returns numpy array of python :class:`datetime.date` + objects. Namely, the date part of Timestamps without time and timezone + information. + Examples -------- For Series: @@ -1428,6 +1444,12 @@ def timetz(self) -> npt.NDArray[np.object_]: The time part of the Timestamps. + See Also + -------- + DatetimeIndex.time : Returns numpy array of :class:`datetime.time` objects. + The time part of the Timestamps. + DatetimeIndex.tz : Return the timezone. + Examples -------- For Series: @@ -1462,6 +1484,14 @@ def date(self) -> npt.NDArray[np.object_]: Namely, the date part of Timestamps without time and timezone information. + See Also + -------- + DatetimeIndex.time : Returns numpy array of :class:`datetime.time` objects. + The time part of the Timestamps. + DatetimeIndex.year : The year of the datetime. + DatetimeIndex.month : The month as January=1, December=12. + DatetimeIndex.day : The day of the datetime. + Examples -------- For Series: @@ -1806,6 +1836,11 @@ def isocalendar(self) -> DataFrame: """ The ordinal day of the year. + See Also + -------- + DatetimeIndex.dayofweek : The day of the week with Monday=0, Sunday=6. + DatetimeIndex.day : The day of the datetime. + Examples -------- For Series: @@ -1836,6 +1871,12 @@ def isocalendar(self) -> DataFrame: """ The quarter of the date. + See Also + -------- + DatetimeIndex.snap : Snap time stamps to nearest occurring frequency. + DatetimeIndex.time : Returns numpy array of datetime.time objects. + The time part of the Timestamps. + Examples -------- For Series: @@ -2120,6 +2161,13 @@ def isocalendar(self) -> DataFrame: Series or ndarray Booleans indicating if dates belong to a leap year. + See Also + -------- + DatetimeIndex.is_year_end : Indicate whether the date is the + last day of the year. + DatetimeIndex.is_year_start : Indicate whether the date is the first + day of a year. + Examples -------- This method is available on Series with datetime values under @@ -2200,9 +2248,25 @@ def std( axis : int, optional Axis for the function to be applied on. For :class:`pandas.Series` this parameter is unused and defaults to ``None``. + dtype : dtype, optional, default None + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types + it is the same as the array type. + out : ndarray, optional, default None + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. ddof : int, default 1 Degrees of Freedom. The divisor used in calculations is `N - ddof`, where `N` represents the number of elements. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. If the default + value is passed, then keepdims will not be passed through to the + std method of sub-classes of ndarray, however any non-default value + will be. If the sub-class method does not implement keepdims any + exceptions will be raised. skipna : bool, default True Exclude NA/null values. If an entire row/column is ``NA``, the result will be ``NA``. @@ -2210,6 +2274,7 @@ def std( Returns ------- Timedelta + Standard deviation over requested axis. See Also -------- diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 6eb4d234b349d7..ff43f971611362 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -322,7 +322,7 @@ def _unbox_scalar(self, value) -> np.timedelta64: if value is NaT: return np.timedelta64(value._value, self.unit) else: - return value.as_unit(self.unit).asm8 + return value.as_unit(self.unit, round_ok=False).asm8 def _scalar_from_string(self, value) -> Timedelta | NaTType: return Timedelta(value) diff --git a/pandas/core/base.py b/pandas/core/base.py index 9b1251a4ef5d8c..f535f0c55415ad 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -309,6 +309,10 @@ def transpose(self, *args, **kwargs) -> Self: doc=""" Return the transpose, which is by definition self. + See Also + -------- + Index : Immutable sequence used for indexing and alignment. + Examples -------- For Series: @@ -398,6 +402,11 @@ def item(self): ValueError If the data is not length = 1. + See Also + -------- + Index.values : Returns an array representing the data in the Index. + Series.head : Returns the first `n` rows. + Examples -------- >>> s = pd.Series([1]) @@ -419,6 +428,11 @@ def nbytes(self) -> int: """ Return the number of bytes in the underlying data. + See Also + -------- + Series.ndim : Number of dimensions of the underlying data. + Series.size : Return the number of elements in the underlying data. + Examples -------- For Series: @@ -681,6 +695,40 @@ def to_numpy( @final @property def empty(self) -> bool: + """ + Indicator whether Index is empty. + + Returns + ------- + bool + If Index is empty, return True, if not return False. + + See Also + -------- + Index.size : Return the number of elements in the underlying data. + + Examples + -------- + >>> idx_empty = pd.Index([1, 2, 3]) + >>> idx_empty + Index([1, 2, 3], dtype='int64') + >>> idx_empty.empty + False + + >>> idx_empty = pd.Index([]) + >>> idx_empty + Index([], dtype='object') + >>> idx_empty.empty + True + + If we only have NaNs in our DataFrame, it is not considered empty! + + >>> idx_empty = pd.Index([np.nan, np.nan]) + >>> idx_empty + Index([nan, nan], dtype='float64') + >>> idx_empty.empty + False + """ return not self.size @doc(op="max", oppose="min", value="largest") @@ -784,6 +832,7 @@ def tolist(self) -> list: Returns ------- list + List containing the values as Python or pandas scalers. See Also -------- @@ -1121,6 +1170,7 @@ def _memory_usage(self, deep: bool = False) -> int: Returns ------- bytes used + Returns memory usage of the values in the Index in bytes. See Also -------- diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 7d8e23abf43b6b..b7a1cb173f6599 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -45,6 +45,7 @@ _unary_math_ops = ( "sin", "cos", + "tan", "exp", "log", "expm1", diff --git a/pandas/core/construction.py b/pandas/core/construction.py index ec49340e9a5166..2718e9819cdf88 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -7,11 +7,8 @@ from __future__ import annotations -from collections.abc import Sequence from typing import ( TYPE_CHECKING, - Optional, - Union, cast, overload, ) @@ -23,17 +20,9 @@ from pandas._libs import lib from pandas._libs.tslibs import ( - Period, get_supported_dtype, is_supported_dtype, ) -from pandas._typing import ( - AnyArrayLike, - ArrayLike, - Dtype, - DtypeObj, - T, -) from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import ( @@ -46,6 +35,7 @@ maybe_promote, ) from pandas.core.dtypes.common import ( + ensure_object, is_list_like, is_object_dtype, is_string_dtype, @@ -63,11 +53,25 @@ import pandas.core.common as com if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + AnyArrayLike, + ArrayLike, + Dtype, + DtypeObj, + T, + ) + from pandas import ( Index, Series, ) - from pandas.core.arrays.base import ExtensionArray + from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + TimedeltaArray, + ) def array( @@ -286,9 +290,7 @@ def array( ExtensionArray, FloatingArray, IntegerArray, - IntervalArray, NumpyExtensionArray, - PeriodArray, TimedeltaArray, ) from pandas.core.arrays.string_ import StringDtype @@ -320,46 +322,58 @@ def array( return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: - inferred_dtype = lib.infer_dtype(data, skipna=True) - if inferred_dtype == "period": - period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data) - return PeriodArray._from_sequence(period_data, copy=copy) - - elif inferred_dtype == "interval": - return IntervalArray(data, copy=copy) - - elif inferred_dtype.startswith("datetime"): - # datetime, datetime64 - try: - return DatetimeArray._from_sequence(data, copy=copy) - except ValueError: - # Mixture of timezones, fall back to NumpyExtensionArray - pass - - elif inferred_dtype.startswith("timedelta"): - # timedelta, timedelta64 - return TimedeltaArray._from_sequence(data, copy=copy) - - elif inferred_dtype == "string": + was_ndarray = isinstance(data, np.ndarray) + # error: Item "Sequence[object]" of "Sequence[object] | ExtensionArray | + # ndarray[Any, Any]" has no attribute "dtype" + if not was_ndarray or data.dtype == object: # type: ignore[union-attr] + result = lib.maybe_convert_objects( + ensure_object(data), + convert_non_numeric=True, + convert_to_nullable_dtype=True, + dtype_if_all_nat=None, + ) + result = ensure_wrapped_if_datetimelike(result) + if isinstance(result, np.ndarray): + if len(result) == 0 and not was_ndarray: + # e.g. empty list + return FloatingArray._from_sequence(data, dtype="Float64") + return NumpyExtensionArray._from_sequence( + data, dtype=result.dtype, copy=copy + ) + if result is data and copy: + return result.copy() + return result + + data = cast(np.ndarray, data) + result = ensure_wrapped_if_datetimelike(data) + if result is not data: + result = cast("DatetimeArray | TimedeltaArray", result) + if copy and result.dtype == data.dtype: + return result.copy() + return result + + if data.dtype.kind in "SU": # StringArray/ArrowStringArray depending on pd.options.mode.string_storage dtype = StringDtype() cls = dtype.construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) - elif inferred_dtype == "integer": + elif data.dtype.kind in "iu": return IntegerArray._from_sequence(data, copy=copy) - elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data): - return FloatingArray._from_sequence(data, copy=copy) - elif ( - inferred_dtype in ("floating", "mixed-integer-float") - and getattr(data, "dtype", None) != np.float16 - ): + elif data.dtype.kind == "f": # GH#44715 Exclude np.float16 bc FloatingArray does not support it; # we will fall back to NumpyExtensionArray. + if data.dtype == np.float16: + return NumpyExtensionArray._from_sequence( + data, dtype=data.dtype, copy=copy + ) return FloatingArray._from_sequence(data, copy=copy) - elif inferred_dtype == "boolean": + elif data.dtype.kind == "b": return BooleanArray._from_sequence(data, dtype="boolean", copy=copy) + else: + # e.g. complex + return NumpyExtensionArray._from_sequence(data, dtype=data.dtype, copy=copy) # Pandas overrides NumPy for # 1. datetime64[ns,us,ms,s] diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 98e689528744ef..778b6bd6f3f182 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -717,6 +717,11 @@ class DatetimeTZDtype(PandasExtensionDtype): ZoneInfoNotFoundError When the requested timezone cannot be found. + See Also + -------- + numpy.datetime64 : Numpy data type for datetime. + datetime.datetime : Python datetime object. + Examples -------- >>> from zoneinfo import ZoneInfo @@ -793,6 +798,10 @@ def unit(self) -> str_type: """ The precision of the datetime data. + See Also + -------- + DatetimeTZDtype.tz : Retrieves the timezone. + Examples -------- >>> from zoneinfo import ZoneInfo @@ -807,6 +816,10 @@ def tz(self) -> tzinfo: """ The timezone. + See Also + -------- + DatetimeTZDtype.unit : Retrieves precision of the datetime data. + Examples -------- >>> from zoneinfo import ZoneInfo diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0185ca82416176..9fbbc2c08efaa2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2685,6 +2685,16 @@ def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: This includes the `compression`, `compression_level`, `chunksize` and `version` keywords. + See Also + -------- + DataFrame.to_parquet : Write a DataFrame to the binary parquet format. + DataFrame.to_excel : Write object to an Excel sheet. + DataFrame.to_sql : Write to a sql table. + DataFrame.to_csv : Write a csv file. + DataFrame.to_json : Convert the object to a JSON string. + DataFrame.to_html : Render a DataFrame as an HTML table. + DataFrame.to_string : Convert DataFrame to a string. + Notes ----- This function writes the dataframe as a `feather file @@ -2866,6 +2876,9 @@ def to_parquet( Returns ------- bytes if no path argument is provided else None + Returns the DataFrame converted to the binary parquet format as bytes if no + path argument. Returns None and writes the DataFrame to the specified + location in the Parquet format if the path argument is provided. See Also -------- @@ -4012,7 +4025,6 @@ def _get_value(self, index, col, takeable: bool = False) -> Scalar: return series._values[index] series = self._get_item(col) - engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect @@ -4023,7 +4035,7 @@ def _get_value(self, index, col, takeable: bool = False) -> Scalar: # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing - loc = engine.get_loc(index) + loc = self.index._engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: @@ -5535,6 +5547,11 @@ def pop(self, item: Hashable) -> Series: Series Series representing the item that is dropped. + See Also + -------- + DataFrame.drop: Drop specified labels from rows or columns. + DataFrame.drop_duplicates: Return DataFrame with duplicate rows removed. + Examples -------- >>> df = pd.DataFrame( @@ -7682,6 +7699,10 @@ def reorder_levels(self, order: Sequence[int | str], axis: Axis = 0) -> DataFram DataFrame DataFrame with indices or columns with reordered levels. + See Also + -------- + DataFrame.swaplevel : Swap levels i and j in a MultiIndex. + Examples -------- >>> data = { @@ -12893,6 +12914,11 @@ def isin_(x): """ The column labels of the DataFrame. + See Also + -------- + DataFrame.index: The index (row labels) of the DataFrame. + DataFrame.axes: Return a list representing the axes of the DataFrame. + Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) @@ -12921,12 +12947,12 @@ def _to_dict_of_blocks(self): Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. - Internal ONLY - only works for BlockManager + Internal ONLY. """ mgr = self._mgr return { k: self._constructor_from_mgr(v, axes=v.axes).__finalize__(self) - for k, v in mgr.to_dict().items() + for k, v in mgr.to_iter_dict() } @property diff --git a/pandas/core/generic.py b/pandas/core/generic.py index dbe20066424841..121f49cb7d1cfa 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -783,6 +783,12 @@ def droplevel(self, level: IndexLabel, axis: Axis = 0) -> Self: {klass} {klass} with requested index / column level(s) removed. + See Also + -------- + DataFrame.replace : Replace values given in `to_replace` with `value`. + DataFrame.pivot : Return reshaped DataFrame organized by given + index / column values. + Examples -------- >>> df = ( @@ -1862,6 +1868,11 @@ def __iter__(self) -> Iterator: iterator Info axis as iterator. + See Also + -------- + DataFrame.items : Iterate over (column name, Series) pairs. + DataFrame.itertuples : Iterate over DataFrame rows as namedtuples. + Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) @@ -6568,6 +6579,7 @@ def infer_objects(self, copy: bool | lib.NoDefault = lib.no_default) -> Self: Returns ------- same type as input object + Returns an object of the same type as the input object. See Also -------- diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py index 2e6bcda520aba0..d108f840a1b4fc 100644 --- a/pandas/core/indexers/objects.py +++ b/pandas/core/indexers/objects.py @@ -53,11 +53,8 @@ class BaseIndexer: >>> from pandas.api.indexers import BaseIndexer >>> class CustomIndexer(BaseIndexer): ... def get_window_bounds(self, num_values, min_periods, center, closed, step): - ... start = np.empty(num_values, dtype=np.int64) - ... end = np.empty(num_values, dtype=np.int64) - ... for i in range(num_values): - ... start[i] = i - ... end[i] = i + self.window_size + ... start = np.arange(num_values, dtype=np.int64) + ... end = np.arange(num_values, dtype=np.int64) + self.window_size ... return start, end >>> df = pd.DataFrame({"values": range(5)}) >>> indexer = CustomIndexer(window_size=2) diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 2bb234e174563c..3dcd1fedc8d641 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -9,10 +9,12 @@ NoReturn, cast, ) +import warnings import numpy as np from pandas._libs import lib +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_integer_dtype, @@ -210,6 +212,15 @@ def _delegate_method(self, name: str, *args, **kwargs): return result def to_pytimedelta(self): + # GH 57463 + warnings.warn( + f"The behavior of {type(self).__name__}.to_pytimedelta is deprecated, " + "in a future version this will return a Series containing python " + "datetime.timedelta objects instead of an ndarray. To retain the " + "old behavior, call `np.array` on the result", + FutureWarning, + stacklevel=find_stack_level(), + ) return cast(ArrowExtensionArray, self._parent.array)._dt_to_pytimedelta() def to_pydatetime(self) -> Series: @@ -462,6 +473,15 @@ def to_pytimedelta(self) -> np.ndarray: datetime.timedelta(days=2), datetime.timedelta(days=3), datetime.timedelta(days=4)], dtype=object) """ + # GH 57463 + warnings.warn( + f"The behavior of {type(self).__name__}.to_pytimedelta is deprecated, " + "in a future version this will return a Series containing python " + "datetime.timedelta objects instead of an ndarray. To retain the " + "old behavior, call `np.array` on the result", + FutureWarning, + stacklevel=find_stack_level(), + ) return self._get_values().to_pytimedelta() @property diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8ede401f37184c..9acab2642f6be6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -832,7 +832,8 @@ def _reset_identity(self) -> None: @final def _cleanup(self) -> None: - self._engine.clear_mapping() + if "_engine" in self._cache: + self._engine.clear_mapping() @cache_readonly def _engine( @@ -976,6 +977,10 @@ def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. + See Also + -------- + Index.inferred_type: Return a string of the type inferred from the values. + Examples -------- >>> idx = pd.Index([1, 2, 3]) @@ -1056,6 +1061,12 @@ def astype(self, dtype, copy: bool = True): Index Index with values cast to specified dtype. + See Also + -------- + Index.dtype: Return the dtype object of the underlying data. + Index.dtypes: Return the dtype object of the underlying data. + Index.convert_dtypes: Convert columns to the best possible dtypes. + Examples -------- >>> idx = pd.Index([1, 2, 3]) @@ -1251,12 +1262,19 @@ def copy( name : Label, optional Set name for new object. deep : bool, default False + If True attempts to make a deep copy of the Index. + Else makes a shallow copy. Returns ------- Index Index refer to new object which is a copy of this object. + See Also + -------- + Index.delete: Make new Index with passed location(-s) deleted. + Index.drop: Make new Index with passed list of labels deleted. + Notes ----- In most cases, there should be no functional difference from using @@ -1638,6 +1656,11 @@ def name(self) -> Hashable: """ Return Index or MultiIndex name. + See Also + -------- + Index.set_names: Able to set new names partially and by level. + Index.rename: Able to set new names partially and by level. + Examples -------- >>> idx = pd.Index([1, 2, 3], name="x") @@ -2077,6 +2100,12 @@ def droplevel(self, level: IndexLabel = 0): Returns ------- Index or MultiIndex + Returns an Index or MultiIndex object, depending on the resulting index + after removing the requested level(s). + + See Also + -------- + Index.dropna : Return Index without NA/NaN values. Examples -------- @@ -2344,6 +2373,10 @@ def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. + See Also + -------- + Index.dtype : Return the dtype object of the underlying data. + Examples -------- >>> idx = pd.Index([1, 2, 3]) @@ -2423,6 +2456,12 @@ def hasnans(self) -> bool: ------- bool + See Also + -------- + Index.isna : Detect missing values. + Index.dropna : Return Index without NA/NaN values. + Index.fillna : Fill NA/NaN values with the specified value. + Examples -------- >>> s = pd.Series([1, 2, 3], index=["a", "b", None]) @@ -2556,6 +2595,7 @@ def fillna(self, value): Returns ------- Index + NA/NaN values replaced with `value`. See Also -------- @@ -2592,6 +2632,12 @@ def dropna(self, how: AnyAll = "any") -> Self: Returns ------- Index + Returns an Index object after removing NA/NaN values. + + See Also + -------- + Index.fillna : Fill NA/NaN values with the specified value. + Index.isna : Detect missing values. Examples -------- @@ -2625,6 +2671,7 @@ def unique(self, level: Hashable | None = None) -> Self: Returns ------- Index + Unique values in the index. See Also -------- @@ -2660,6 +2707,7 @@ def drop_duplicates(self, *, keep: DropKeep = "first") -> Self: Returns ------- Index + A new Index object with the duplicate values removed. See Also -------- @@ -4841,8 +4889,9 @@ def _from_join_target(self, result: np.ndarray) -> ArrayLike: def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) - # include our engine hashtable - result += self._engine.sizeof(deep=deep) + # include our engine hashtable, only if it's already cached + if "_engine" in self._cache: + result += self._engine.sizeof(deep=deep) return result @final @@ -5011,12 +5060,9 @@ def __getitem__(self, key): if not isinstance(self.dtype, ExtensionDtype): if len(key) == 0 and len(key) != len(self): - warnings.warn( - "Using a boolean indexer with length 0 on an Index with " - "length greater than 0 is deprecated and will raise in a " - "future version.", - FutureWarning, - stacklevel=find_stack_level(), + raise ValueError( + "The length of the boolean indexer cannot be 0 " + "when the Index has length greater than 0." ) result = getitem(key) @@ -5175,6 +5221,12 @@ def equals(self, other: Any) -> bool: True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. + See Also + -------- + Index.identical: Checks that object attributes and types are also equal. + Index.has_duplicates: Check if the Index has duplicate values. + Index.is_unique: Return if the index has unique values. + Examples -------- >>> idx1 = pd.Index([1, 2, 3]) @@ -6067,6 +6119,10 @@ def map(self, mapper, na_action: Literal["ignore"] | None = None): If the function returns a tuple with more than one element a MultiIndex will be returned. + See Also + -------- + Index.where : Replace values where the condition is False. + Examples -------- >>> idx = pd.Index([1, 2, 3]) @@ -6349,7 +6405,10 @@ def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: Parameters ---------- label : object + The label for which to calculate the slice bound. side : {'left', 'right'} + if 'left' return leftmost position of given label. + if 'right' return one-past-the-rightmost position of given label. Returns ------- @@ -6438,6 +6497,8 @@ def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: Returns ------- tuple[int, int] + Returns a tuple of two integers representing the slice locations for the + input labels within the index. See Also -------- diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index cefdc14145d1f7..951455b627fbdf 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -455,6 +455,13 @@ def snap(self, freq: Frequency = "S") -> DatetimeIndex: ------- DatetimeIndex + See Also + -------- + DatetimeIndex.round : Perform round operation on the data to the + specified `freq`. + DatetimeIndex.floor : Perform floor operation on the data to the + specified `freq`. + Examples -------- >>> idx = pd.DatetimeIndex( @@ -508,6 +515,8 @@ def _parsed_string_to_bounds( freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev) per = Period(parsed, freq=freq) start, end = per.start_time, per.end_time + start = start.as_unit(self.unit) + end = end.as_unit(self.unit) # GH 24076 # If an incoming date string contained a UTC offset, need to localize @@ -750,6 +759,7 @@ def indexer_between_time( Returns ------- np.ndarray[np.intp] + Index locations of values between particular times of day. See Also -------- diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 21ce9b759f2df3..c8e16fad00d5b7 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1391,8 +1391,9 @@ def _nbytes(self, deep: bool = False) -> int: names_nbytes = sum(getsizeof(i, objsize) for i in self.names) result = level_nbytes + label_nbytes + names_nbytes - # include our engine hashtable - result += self._engine.sizeof(deep=deep) + # include our engine hashtable, only if it's already cached + if "_engine" in self._cache: + result += self._engine.sizeof(deep=deep) return result # -------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 7be1d5d95ffdf2..28d3292a1c65bb 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -38,7 +38,10 @@ Shape, npt, ) -from pandas.errors import AbstractMethodError +from pandas.errors import ( + AbstractMethodError, + OutOfBoundsDatetime, +) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg @@ -118,6 +121,7 @@ if TYPE_CHECKING: from collections.abc import ( + Generator, Iterable, Sequence, ) @@ -385,20 +389,18 @@ def _split_op_result(self, result: ArrayLike) -> list[Block]: return [nb] @final - def _split(self) -> list[Block]: + def _split(self) -> Generator[Block, None, None]: """ Split a block into a list of single-column blocks. """ assert self.ndim == 2 - new_blocks = [] for i, ref_loc in enumerate(self._mgr_locs): vals = self.values[slice(i, i + 1)] bp = BlockPlacement(ref_loc) nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) - new_blocks.append(nb) - return new_blocks + yield nb @final def split_and_operate(self, func, *args, **kwargs) -> list[Block]: @@ -479,7 +481,17 @@ def coerce_to_target_dtype(self, other, warn_on_upcast: bool = False) -> Block: f"{self.values.dtype}. Please report a bug at " "https://github.com/pandas-dev/pandas/issues." ) - return self.astype(new_dtype) + try: + return self.astype(new_dtype) + except OutOfBoundsDatetime as err: + # e.g. GH#56419 if self.dtype is a low-resolution dt64 and we try to + # upcast to a higher-resolution dt64, we may have entries that are + # out of bounds for the higher resolution. + # Re-raise with a more informative message. + raise OutOfBoundsDatetime( + f"Incompatible (high-resolution) value for dtype='{self.dtype}'. " + "Explicitly cast before operating." + ) from err @final def convert(self) -> list[Block]: @@ -537,7 +549,9 @@ def convert_dtypes( rbs = [] for blk in blks: # Determine dtype column by column - sub_blks = [blk] if blk.ndim == 1 or self.shape[0] == 1 else blk._split() + sub_blks = ( + [blk] if blk.ndim == 1 or self.shape[0] == 1 else list(blk._split()) + ) dtypes = [ convert_dtypes( b.values, @@ -1190,8 +1204,7 @@ def putmask(self, mask, new) -> list[Block]: is_array = isinstance(new, np.ndarray) res_blocks = [] - nbs = self._split() - for i, nb in enumerate(nbs): + for i, nb in enumerate(self._split()): n = new if is_array: # we have a different value per-column @@ -1255,8 +1268,7 @@ def where(self, other, cond) -> list[Block]: is_array = isinstance(other, (np.ndarray, ExtensionArray)) res_blocks = [] - nbs = self._split() - for i, nb in enumerate(nbs): + for i, nb in enumerate(self._split()): oth = other if is_array: # we have a different value per-column @@ -1698,8 +1710,7 @@ def where(self, other, cond) -> list[Block]: is_array = isinstance(orig_other, (np.ndarray, ExtensionArray)) res_blocks = [] - nbs = self._split() - for i, nb in enumerate(nbs): + for i, nb in enumerate(self._split()): n = orig_other if is_array: # we have a different value per-column @@ -1760,8 +1771,7 @@ def putmask(self, mask, new) -> list[Block]: is_array = isinstance(orig_new, (np.ndarray, ExtensionArray)) res_blocks = [] - nbs = self._split() - for i, nb in enumerate(nbs): + for i, nb in enumerate(self._split()): n = orig_new if is_array: # we have a different value per-column diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 8fda9cd23b508a..7c1bcbec1d3f23 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -92,6 +92,8 @@ ) if TYPE_CHECKING: + from collections.abc import Generator + from pandas._typing import ( ArrayLike, AxisInt, @@ -645,8 +647,7 @@ def get_bool_data(self) -> Self: new_blocks.append(blk) elif blk.is_object: - nbs = blk._split() - new_blocks.extend(nb for nb in nbs if nb.is_bool) + new_blocks.extend(nb for nb in blk._split() if nb.is_bool) return self._combine(new_blocks) @@ -1525,7 +1526,9 @@ def _insert_update_mgr_locs(self, loc) -> None: When inserting a new Block at location 'loc', we increment all of the mgr_locs of blocks above that by one. """ - for blkno, count in _fast_count_smallints(self.blknos[loc:]): + # Faster version of set(arr) for sequences of small numbers + blknos = np.bincount(self.blknos[loc:]).nonzero()[0] + for blkno in blknos: # .620 this way, .326 of which is in increment_above blk = self.blocks[blkno] blk._mgr_locs = blk._mgr_locs.increment_above(loc) @@ -1597,7 +1600,7 @@ def grouped_reduce(self, func: Callable) -> Self: nrows = 0 else: nrows = result_blocks[0].values.shape[-1] - index = Index(range(nrows)) + index = default_index(nrows) return type(self).from_blocks(result_blocks, [self.axes[0], index]) @@ -1735,21 +1738,18 @@ def unstack(self, unstacker, fill_value) -> BlockManager: bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) return bm - def to_dict(self) -> dict[str, Self]: + def to_iter_dict(self) -> Generator[tuple[str, Self], None, None]: """ - Return a dict of str(dtype) -> BlockManager + Yield a tuple of (str(dtype), BlockManager) Returns ------- - values : a dict of dtype -> BlockManager + values : a tuple of (str(dtype), BlockManager) """ - - bd: dict[str, list[Block]] = {} - for b in self.blocks: - bd.setdefault(str(b.dtype), []).append(b) - - # TODO(EA2D): the combine will be unnecessary with 2D EAs - return {dtype: self._combine(blocks) for dtype, blocks in bd.items()} + key = lambda block: str(block.dtype) + for dtype, blocks in itertools.groupby(sorted(self.blocks, key=key), key=key): + # TODO(EA2D): the combine will be unnecessary with 2D EAs + yield dtype, self._combine(list(blocks)) def as_array( self, @@ -2330,7 +2330,7 @@ def _grouping_func(tup: tuple[int, ArrayLike]) -> tuple[int, DtypeObj]: def _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list[Block]: - tuples = list(enumerate(arrays)) + tuples = enumerate(arrays) if not consolidate: return _tuples_to_blocks_no_consolidate(tuples, refs) @@ -2351,7 +2351,7 @@ def _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list if issubclass(dtype.type, (str, bytes)): dtype = np.dtype(object) - values, placement = _stack_arrays(list(tup_block), dtype) + values, placement = _stack_arrays(tup_block, dtype) if is_dtlike: values = ensure_wrapped_if_datetimelike(values) blk = block_type(values, placement=BlockPlacement(placement), ndim=2) @@ -2450,15 +2450,6 @@ def _merge_blocks( return blocks, False -def _fast_count_smallints(arr: npt.NDArray[np.intp]): - """Faster version of set(arr) for sequences of small numbers.""" - counts = np.bincount(arr) - nz = counts.nonzero()[0] - # Note: list(zip(...) outperforms list(np.c_[nz, counts[nz]]) here, - # in one benchmark by a factor of 11 - return zip(nz, counts[nz]) - - def _preprocess_slice_or_indexer( slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool ): diff --git a/pandas/core/methods/to_dict.py b/pandas/core/methods/to_dict.py index 57e03dedc384db..84202a4fcc8401 100644 --- a/pandas/core/methods/to_dict.py +++ b/pandas/core/methods/to_dict.py @@ -148,7 +148,7 @@ def to_dict( Return a collections.abc.MutableMapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. """ - if not df.columns.is_unique: + if orient != "tight" and not df.columns.is_unique: warnings.warn( "DataFrame columns are not unique, some columns will be omitted.", UserWarning, diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 9fef78d9f8c3de..039d868bccd161 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -314,7 +314,16 @@ def get_interp_index(method, index: Index) -> Index: # prior default from pandas import Index - index = Index(np.arange(len(index))) + if isinstance(index.dtype, DatetimeTZDtype) or lib.is_np_dtype( + index.dtype, "mM" + ): + # Convert datetime-like indexes to int64 + index = Index(index.view("i8")) + + elif not is_numeric_dtype(index.dtype): + # We keep behavior consistent with prior versions of pandas for + # non-numeric, non-datetime indexes + index = Index(range(len(index))) else: methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( @@ -616,6 +625,9 @@ def _interpolate_scipy_wrapper( terp = alt_methods.get(method, None) if terp is None: raise ValueError(f"Can not interpolate with method={method}.") + + # Make sure downcast is not in kwargs for alt methods + kwargs.pop("downcast", None) new_y = terp(x, y, new_x, **kwargs) return new_y diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 86d1f55f38c050..ccbe25fdae8413 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -80,6 +80,7 @@ TimedeltaIndex, timedelta_range, ) +from pandas.core.reshape.concat import concat from pandas.tseries.frequencies import ( is_subperiod, @@ -885,30 +886,59 @@ def interpolate( Freq: 500ms, dtype: float64 Internal reindexing with ``asfreq()`` prior to interpolation leads to - an interpolated timeseries on the basis the reindexed timestamps (anchors). - Since not all datapoints from original series become anchors, - it can lead to misleading interpolation results as in the following example: + an interpolated timeseries on the basis of the reindexed timestamps + (anchors). It is assured that all available datapoints from original + series become anchors, so it also works for resampling-cases that lead + to non-aligned timestamps, as in the following example: >>> series.resample("400ms").interpolate("linear") 2023-03-01 07:00:00.000 1.0 - 2023-03-01 07:00:00.400 1.2 - 2023-03-01 07:00:00.800 1.4 - 2023-03-01 07:00:01.200 1.6 - 2023-03-01 07:00:01.600 1.8 + 2023-03-01 07:00:00.400 0.2 + 2023-03-01 07:00:00.800 -0.6 + 2023-03-01 07:00:01.200 -0.4 + 2023-03-01 07:00:01.600 0.8 2023-03-01 07:00:02.000 2.0 - 2023-03-01 07:00:02.400 2.2 - 2023-03-01 07:00:02.800 2.4 - 2023-03-01 07:00:03.200 2.6 - 2023-03-01 07:00:03.600 2.8 + 2023-03-01 07:00:02.400 1.6 + 2023-03-01 07:00:02.800 1.2 + 2023-03-01 07:00:03.200 1.4 + 2023-03-01 07:00:03.600 2.2 2023-03-01 07:00:04.000 3.0 Freq: 400ms, dtype: float64 - Note that the series erroneously increases between two anchors + Note that the series correctly decreases between two anchors ``07:00:00`` and ``07:00:02``. """ assert downcast is lib.no_default # just checking coverage result = self._upsample("asfreq") - return result.interpolate( + + # If the original data has timestamps which are not aligned with the + # target timestamps, we need to add those points back to the data frame + # that is supposed to be interpolated. This does not work with + # PeriodIndex, so we skip this case. GH#21351 + obj = self._selected_obj + is_period_index = isinstance(obj.index, PeriodIndex) + + # Skip this step for PeriodIndex + if not is_period_index: + final_index = result.index + if isinstance(final_index, MultiIndex): + raise NotImplementedError( + "Direct interpolation of MultiIndex data frames is not " + "supported. If you tried to resample and interpolate on a " + "grouped data frame, please use:\n" + "`df.groupby(...).apply(lambda x: x.resample(...)." + "interpolate(...), include_groups=False)`" + "\ninstead, as resampling and interpolation has to be " + "performed for each group independently." + ) + + missing_data_points_index = obj.index.difference(final_index) + if len(missing_data_points_index) > 0: + result = concat( + [result, obj.loc[missing_data_points_index]] + ).sort_index() + + result_interpolated = result.interpolate( method=method, axis=axis, limit=limit, @@ -919,6 +949,18 @@ def interpolate( **kwargs, ) + # No further steps if the original data has a PeriodIndex + if is_period_index: + return result_interpolated + + # Make sure that original data points which do not align with the + # resampled index are removed + result_interpolated = result_interpolated.loc[final_index] + + # Make sure frequency indexes are preserved + result_interpolated.index = final_index + return result_interpolated + @final def asfreq(self, fill_value=None): """ diff --git a/pandas/core/series.py b/pandas/core/series.py index a72eb8e261e659..c1920312489c99 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5359,6 +5359,8 @@ def case_when( """ Replace values where the conditions are True. + .. versionadded:: 2.2.0 + Parameters ---------- caselist : A list of tuples of conditions and expected replacements @@ -5376,8 +5378,6 @@ def case_when( must not change the input Series (though pandas doesn`t check it). - .. versionadded:: 2.2.0 - Returns ------- Series diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index df7a6cdb1ea52d..b01cdb335ec463 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -481,7 +481,7 @@ def _to_datetime_with_unit(arg, unit, name, utc: bool, errors: str) -> Index: """ arg = extract_array(arg, extract_numpy=True) - # GH#30050 pass an ndarray to tslib.array_with_unit_to_datetime + # GH#30050 pass an ndarray to tslib.array_to_datetime # because it expects an ndarray argument if isinstance(arg, IntegerArray): arr = arg.astype(f"datetime64[{unit}]") @@ -519,7 +519,12 @@ def _to_datetime_with_unit(arg, unit, name, utc: bool, errors: str) -> Index: tz_parsed = None else: arg = arg.astype(object, copy=False) - arr, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit, errors=errors) + arr, tz_parsed = tslib.array_to_datetime( + arg, + utc=utc, + errors=errors, + unit_for_numerics=unit, + ) result = DatetimeIndex(arr, name=name) if not isinstance(result, DatetimeIndex): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index d7fc71d037f2df..d585c59dd55815 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -656,6 +656,12 @@ def keys(self, include: str = "pandas") -> list[str]: ------ raises ValueError if kind has an illegal value + See Also + -------- + HDFStore.info : Prints detailed information on the store. + HDFStore.get_node : Returns the node with the key. + HDFStore.get_storer : Returns the storer object for a key. + Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) @@ -853,6 +859,12 @@ def select( object Retrieved object from file. + See Also + -------- + HDFStore.select_as_coordinates : Returns the selection as an index. + HDFStore.select_column : Returns a single column from the table. + HDFStore.select_as_multiple : Retrieves pandas objects from multiple tables. + Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) @@ -1132,12 +1144,27 @@ def put( Write DataFrame index as a column. append : bool, default False This will force Table format, append the input data to the existing. + complib : default None + This parameter is currently not accepted. + complevel : int, 0-9, default None + Specifies a compression level for data. + A value of 0 or None disables compression. + min_itemsize : int, dict, or None + Dict of columns that specify minimum str sizes. + nan_rep : str + Str to use as str nan representation. data_columns : list of columns or True, default None List of columns to create as data columns, or True to use all columns. See `here `__. encoding : str, default None Provide an encoding for strings. + errors : str, default 'strict' + The error handling scheme to use for encoding errors. + The default is 'strict' meaning that encoding errors raise a + UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors. track_times : bool, default True Parameter is propagated to 'create_table' method of 'PyTables'. If set to False it enables to have the same h5 files (same hashes) @@ -1145,6 +1172,11 @@ def put( dropna : bool, default False, optional Remove missing values. + See Also + -------- + HDFStore.info : Prints detailed information on the store. + HDFStore.get_storer : Returns the storer object for a key. + Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) @@ -1563,6 +1595,10 @@ def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]: leaves : list Names (strings) of the pandas objects contained in `path`. + See Also + -------- + HDFStore.info : Prints detailed information on the store. + Examples -------- >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) @@ -1688,17 +1724,26 @@ def info(self) -> str: Returns ------- str + A String containing the python pandas class name, filepath to the HDF5 + file and all the object keys along with their respective dataframe shapes. + + See Also + -------- + HDFStore.get_storer : Returns the storer object for a key. Examples -------- - >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["C", "D"]) >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP - >>> store.put("data", df) # doctest: +SKIP + >>> store.put("data1", df1) # doctest: +SKIP + >>> store.put("data2", df2) # doctest: +SKIP >>> print(store.info()) # doctest: +SKIP >>> store.close() # doctest: +SKIP File path: store.h5 - /data frame (shape->[2,2]) + /data1 frame (shape->[2,2]) + /data2 frame (shape->[2,2]) """ path = pprint_thing(self._path) output = f"{type(self)}\nFile path: {path}\n" diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 60bb45d3ac1dcd..ea5daf02b72527 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -233,6 +233,7 @@ def hist_frame( Returns ------- matplotlib.Axes or numpy.ndarray of them + Returns a AxesSubplot object a numpy array of AxesSubplot objects. See Also -------- diff --git a/pandas/tests/arrays/sparse/test_constructors.py b/pandas/tests/arrays/sparse/test_constructors.py index 012ff1da0d4313..0bf3ab77e9eed1 100644 --- a/pandas/tests/arrays/sparse/test_constructors.py +++ b/pandas/tests/arrays/sparse/test_constructors.py @@ -90,13 +90,13 @@ def test_constructor_warns_when_losing_timezone(self): dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") expected = SparseArray(np.asarray(dti, dtype="datetime64[ns]")) - - with tm.assert_produces_warning(UserWarning): + msg = "loses timezone information" + with tm.assert_produces_warning(UserWarning, match=msg): result = SparseArray(dti) tm.assert_sp_array_equal(result, expected) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match=msg): result = SparseArray(pd.Series(dti)) tm.assert_sp_array_equal(result, expected) diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 50dafb5dbbb062..857509e18fa8eb 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -220,6 +220,14 @@ def test_dt64_array(dtype_unit): .construct_array_type() ._from_sequence(["a", None], dtype=pd.StringDtype()), ), + ( + # numpy array with string dtype + np.array(["a", "b"], dtype=str), + None, + pd.StringDtype() + .construct_array_type() + ._from_sequence(["a", "b"], dtype=pd.StringDtype()), + ), # Boolean ( [True, None], @@ -247,6 +255,14 @@ def test_dt64_array(dtype_unit): "category", pd.Categorical([pd.Period("2000", "D"), pd.Period("2001", "D")]), ), + # Complex + ( + np.array([complex(1), complex(2)], dtype=np.complex128), + None, + NumpyExtensionArray( + np.array([complex(1), complex(2)], dtype=np.complex128) + ), + ), ], ) def test_array(data, dtype, expected): diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index cfc04b5c91354a..3d8f8d791b7632 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -661,7 +661,9 @@ def test_array_interface(self, datetime_index): assert result is expected tm.assert_numpy_array_equal(result, expected) result = np.array(arr, dtype="datetime64[ns]") - assert result is not expected + if not np_version_gt2: + # TODO: GH 57739 + assert result is not expected tm.assert_numpy_array_equal(result, expected) # to object dtype @@ -778,7 +780,7 @@ def test_to_period_2d(self, arr1d): arr2d = arr1d.reshape(1, -1) warn = None if arr1d.tz is None else UserWarning - with tm.assert_produces_warning(warn): + with tm.assert_produces_warning(warn, match="will drop timezone information"): result = arr2d.to_period("D") expected = arr1d.to_period("D").reshape(1, -1) tm.assert_period_array_equal(result, expected) @@ -976,7 +978,9 @@ def test_array_interface(self, timedelta_index): assert result is expected tm.assert_numpy_array_equal(result, expected) result = np.array(arr, dtype="timedelta64[ns]") - assert result is not expected + if not np_version_gt2: + # TODO: GH 57739 + assert result is not expected tm.assert_numpy_array_equal(result, expected) # to object dtype diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 8f14c562fa7c3e..ebbb31205e264e 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -1014,7 +1014,8 @@ def test_performance_warning_for_poor_alignment( else: seen = False - with tm.assert_produces_warning(seen): + msg = "Alignment difference on axis 1 is larger than an order of magnitude" + with tm.assert_produces_warning(seen, match=msg): pd.eval("df + s", engine=engine, parser=parser) s = Series(np.random.default_rng(2).standard_normal(1000)) @@ -1036,7 +1037,7 @@ def test_performance_warning_for_poor_alignment( else: wrn = False - with tm.assert_produces_warning(wrn) as w: + with tm.assert_produces_warning(wrn, match=msg) as w: pd.eval("df + s", engine=engine, parser=parser) if not is_python_engine and performance_warning: @@ -1609,22 +1610,20 @@ def eval(self, *args, **kwargs): kwargs["level"] = kwargs.pop("level", 0) + 1 return pd.eval(*args, **kwargs) - @pytest.mark.skipif( - not NUMEXPR_INSTALLED, reason="Unary ops only implemented for numexpr" - ) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") @pytest.mark.parametrize("fn", _unary_math_ops) - def test_unary_functions(self, fn): + def test_unary_functions(self, fn, engine, parser): df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) a = df.a expr = f"{fn}(a)" - got = self.eval(expr) + got = self.eval(expr, engine=engine, parser=parser) with np.errstate(all="ignore"): expect = getattr(np, fn)(a) tm.assert_series_equal(got, expect, check_names=False) @pytest.mark.parametrize("fn", _binary_math_ops) - def test_binary_functions(self, fn): + def test_binary_functions(self, fn, engine, parser): df = DataFrame( { "a": np.random.default_rng(2).standard_normal(10), @@ -1635,7 +1634,7 @@ def test_binary_functions(self, fn): b = df.b expr = f"{fn}(a, b)" - got = self.eval(expr) + got = self.eval(expr, engine=engine, parser=parser) with np.errstate(all="ignore"): expect = getattr(np, fn)(a, b) tm.assert_almost_equal(got, expect, check_names=False) diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index c34c97b6e4f048..f47815ee059af1 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -797,5 +797,5 @@ def test_pandas_dtype_numpy_warning(): def test_pandas_dtype_ea_not_instance(): # GH 31356 GH 54592 - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="without any arguments"): assert pandas_dtype(CategoricalDtype) == CategoricalDtype() diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py index 02c827853b29dc..261f86bfb0326a 100644 --- a/pandas/tests/dtypes/test_generic.py +++ b/pandas/tests/dtypes/test_generic.py @@ -124,7 +124,7 @@ def test_setattr_warnings(): # this should not raise a warning df.two.not_an_index = [1, 2] - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="doesn't allow columns"): # warn when setting column to nonexistent name df.four = df.two + 2 assert df.four.sum() > df.two.sum() diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 668e7192c0e527..f4282c9c7ac3af 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -936,9 +936,9 @@ def test_maybe_convert_objects_bool_nan(self): def test_maybe_convert_objects_nullable_boolean(self): # GH50047 arr = np.array([True, False], dtype=object) - exp = np.array([True, False]) + exp = BooleanArray._from_sequence([True, False], dtype="boolean") out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True) - tm.assert_numpy_array_equal(out, exp) + tm.assert_extension_array_equal(out, exp) arr = np.array([True, False, pd.NaT], dtype=object) exp = np.array([True, False, pd.NaT], dtype=object) diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py index 4b9234a9904a21..cee565d4f7c1ed 100644 --- a/pandas/tests/extension/base/missing.py +++ b/pandas/tests/extension/base/missing.py @@ -27,7 +27,9 @@ def test_isna_returns_copy(self, data_missing, na_func): expected = result.copy() mask = getattr(result, na_func)() if isinstance(mask.dtype, pd.SparseDtype): + # TODO: GH 57739 mask = np.array(mask) + mask.flags.writeable = True mask[:] = True tm.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 9b2251d0b7d4a1..79440b55dd5dd6 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -2861,12 +2861,16 @@ def test_dt_to_pytimedelta(): data = [timedelta(1, 2, 3), timedelta(1, 2, 4)] ser = pd.Series(data, dtype=ArrowDtype(pa.duration("ns"))) - result = ser.dt.to_pytimedelta() + msg = "The behavior of ArrowTemporalProperties.to_pytimedelta is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.dt.to_pytimedelta() expected = np.array(data, dtype=object) tm.assert_numpy_array_equal(result, expected) assert all(type(res) is timedelta for res in result) - expected = ser.astype("timedelta64[ns]").dt.to_pytimedelta() + msg = "The behavior of TimedeltaProperties.to_pytimedelta is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = ser.astype("timedelta64[ns]").dt.to_pytimedelta() tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 5a6fe07aa007b0..69e6228d6efde1 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -145,7 +145,7 @@ def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_fram # we are producing a warning that since the passed boolean # key is not the same as the given index, we will reindex # not sure this is really necessary - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="will be reindexed"): indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1]) subframe_obj = datetime_frame[indexer_obj] tm.assert_frame_equal(subframe_obj, subframe) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 3f98f49cd18778..ed81e8c8b81297 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -711,7 +711,10 @@ def test_setitem_npmatrix_2d(self): df["np-array"] = a # Instantiation of `np.matrix` gives PendingDeprecationWarning - with tm.assert_produces_warning(PendingDeprecationWarning): + with tm.assert_produces_warning( + PendingDeprecationWarning, + match="matrix subclass is not the recommended way to represent matrices", + ): df["np-matrix"] = np.matrix(a) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index 0a9d059736e6f1..cdb9ff8a67b6b4 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -109,7 +109,7 @@ def test_interp_basic_with_non_range_index(self, using_infer_string): else: result = df.set_index("C").interpolate() expected = df.set_index("C") - expected.loc[3, "A"] = 3 + expected.loc[3, "A"] = 2.66667 expected.loc[5, "B"] = 9 tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py index b8631d95a63996..0272b679e85a27 100644 --- a/pandas/tests/frame/methods/test_to_dict.py +++ b/pandas/tests/frame/methods/test_to_dict.py @@ -166,7 +166,7 @@ def test_to_dict_not_unique_warning(self): # GH#16927: When converting to a dict, if a column has a non-unique name # it will be dropped, throwing a warning. df = DataFrame([[1, 2, 3]], columns=["a", "a", "b"]) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="columns will be omitted"): df.to_dict() @pytest.mark.filterwarnings("ignore::UserWarning") @@ -513,6 +513,20 @@ def test_to_dict_masked_native_python(self): result = df.to_dict(orient="records") assert isinstance(result[0]["a"], int) + def test_to_dict_tight_no_warning_with_duplicate_column(self): + # GH#58281 + df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "A"]) + with tm.assert_produces_warning(None): + result = df.to_dict(orient="tight") + expected = { + "index": [0, 1, 2], + "columns": ["A", "A"], + "data": [[1, 2], [3, 4], [5, 6]], + "index_names": [None], + "column_names": [None], + } + assert result == expected + @pytest.mark.parametrize( "val", [Timestamp(2020, 1, 1), Timedelta(1), Period("2020"), Interval(1, 2)] diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index f463b3f94fa557..91b5f905ada228 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1097,7 +1097,7 @@ def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements): and expr.USE_NUMEXPR and switch_numexpr_min_elements == 0 ): - warn = UserWarning # "evaluating in Python space because ..." + warn = UserWarning else: msg = ( f"cannot perform __{op.__name__}__ with this " @@ -1105,17 +1105,16 @@ def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements): ) with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(warn): + with tm.assert_produces_warning(warn, match="evaluating in Python"): op(df, elem.value) elif (op, dtype) in skip: if op in [operator.add, operator.mul]: if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0: - # "evaluating in Python space because ..." warn = UserWarning else: warn = None - with tm.assert_produces_warning(warn): + with tm.assert_produces_warning(warn, match="evaluating in Python"): op(df, elem.value) else: diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 8ccd7b2ca83ba0..5118561f673388 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -699,7 +699,7 @@ def test_mode_sortwarning(self, using_infer_string): expected = DataFrame({"A": ["a", np.nan]}) warning = None if using_infer_string else UserWarning - with tm.assert_produces_warning(warning): + with tm.assert_produces_warning(warning, match="Unable to sort modes"): result = df.mode(dropna=False) result = result.sort_values(by="A").reset_index(drop=True) diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py index 49c6a91236db74..d57df82b2358c6 100644 --- a/pandas/tests/indexes/base_class/test_setops.py +++ b/pandas/tests/indexes/base_class/test_setops.py @@ -84,13 +84,13 @@ def test_union_sort_other_incomparable(self): # https://github.com/pandas-dev/pandas/issues/24959 idx = Index([1, pd.Timestamp("2000")]) # default (sort=None) - with tm.assert_produces_warning(RuntimeWarning): + with tm.assert_produces_warning(RuntimeWarning, match="not supported between"): result = idx.union(idx[:1]) tm.assert_index_equal(result, idx) # sort=None - with tm.assert_produces_warning(RuntimeWarning): + with tm.assert_produces_warning(RuntimeWarning, match="not supported between"): result = idx.union(idx[:1], sort=None) tm.assert_index_equal(result, idx) diff --git a/pandas/tests/indexes/datetimes/methods/test_to_period.py b/pandas/tests/indexes/datetimes/methods/test_to_period.py index 05e9a294d74a6a..5b2cc55d6dc56a 100644 --- a/pandas/tests/indexes/datetimes/methods/test_to_period.py +++ b/pandas/tests/indexes/datetimes/methods/test_to_period.py @@ -117,10 +117,10 @@ def test_to_period_infer(self): freq="5min", ) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="drop timezone info"): pi1 = rng.to_period("5min") - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="drop timezone info"): pi2 = rng.to_period() tm.assert_index_equal(pi1, pi2) @@ -143,8 +143,7 @@ def test_to_period_millisecond(self): ] ) - with tm.assert_produces_warning(UserWarning): - # warning that timezone info will be lost + with tm.assert_produces_warning(UserWarning, match="drop timezone info"): period = index.to_period(freq="ms") assert 2 == len(period) assert period[0] == Period("2007-01-01 10:11:12.123Z", "ms") @@ -158,8 +157,7 @@ def test_to_period_microsecond(self): ] ) - with tm.assert_produces_warning(UserWarning): - # warning that timezone info will be lost + with tm.assert_produces_warning(UserWarning, match="drop timezone info"): period = index.to_period(freq="us") assert 2 == len(period) assert period[0] == Period("2007-01-01 10:11:12.123456Z", "us") @@ -172,10 +170,7 @@ def test_to_period_microsecond(self): def test_to_period_tz(self, tz): ts = date_range("1/1/2000", "2/1/2000", tz=tz) - with tm.assert_produces_warning(UserWarning): - # GH#21333 warning that timezone info will be lost - # filter warning about freq deprecation - + with tm.assert_produces_warning(UserWarning, match="drop timezone info"): result = ts.to_period()[0] expected = ts[0].to_period(ts.freq) @@ -183,8 +178,7 @@ def test_to_period_tz(self, tz): expected = date_range("1/1/2000", "2/1/2000").to_period() - with tm.assert_produces_warning(UserWarning): - # GH#21333 warning that timezone info will be lost + with tm.assert_produces_warning(UserWarning, match="drop timezone info"): result = ts.to_period(ts.freq) tm.assert_index_equal(result, expected) @@ -193,7 +187,7 @@ def test_to_period_tz(self, tz): def test_to_period_tz_utc_offset_consistency(self, tz): # GH#22905 ts = date_range("1/1/2000", "2/1/2000", tz="Etc/GMT-1") - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="drop timezone info"): result = ts.to_period()[0] expected = ts[0].to_period(ts.freq) assert result == expected diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index 9354984538c586..47f21cc7f81828 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -382,7 +382,7 @@ def test_union_sort_other_incomparable(): idx = MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]]) # default, sort=None - with tm.assert_produces_warning(RuntimeWarning): + with tm.assert_produces_warning(RuntimeWarning, match="are unorderable"): result = idx.union(idx[:1]) tm.assert_index_equal(result, idx) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 3a2d04d3ffdc23..2e94961b673f87 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -71,8 +71,8 @@ def test_constructor_casting(self, index): tm.assert_contains_all(arr, new_index) tm.assert_index_equal(index, new_index) - @pytest.mark.parametrize("index", ["string"], indirect=True) - def test_constructor_copy(self, index, using_infer_string): + def test_constructor_copy(self, using_infer_string): + index = Index(list("abc"), name="name") arr = np.array(index) new_index = Index(arr, copy=True, name="name") assert isinstance(new_index, Index) @@ -481,7 +481,7 @@ def test_empty_fancy(self, index, dtype, request, using_infer_string): assert index[[]].identical(empty_index) if dtype == np.bool_: - with tm.assert_produces_warning(FutureWarning, match="is deprecated"): + with pytest.raises(ValueError, match="length of the boolean indexer"): assert index[empty_arr].identical(empty_index) else: assert index[empty_arr].identical(empty_index) @@ -1065,10 +1065,10 @@ def test_outer_join_sort(self): left_index = Index(np.random.default_rng(2).permutation(15)) right_index = date_range("2020-01-01", periods=10) - with tm.assert_produces_warning(RuntimeWarning): + with tm.assert_produces_warning(RuntimeWarning, match="not supported between"): result = left_index.join(right_index, how="outer") - with tm.assert_produces_warning(RuntimeWarning): + with tm.assert_produces_warning(RuntimeWarning, match="not supported between"): expected = left_index.astype(object).union(right_index.astype(object)) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py index 21cb0b8723d590..b544ebac43ecee 100644 --- a/pandas/tests/indexes/test_index_new.py +++ b/pandas/tests/indexes/test_index_new.py @@ -142,25 +142,18 @@ def test_constructor_infer_nat_dt_like( data = [ctor] data.insert(pos, nulls_fixture) - warn = None if nulls_fixture is NA: expected = Index([NA, NaT]) mark = pytest.mark.xfail(reason="Broken with np.NaT ctor; see GH 31884") request.applymarker(mark) - # GH#35942 numpy will emit a DeprecationWarning within the - # assert_index_equal calls. Since we can't do anything - # about it until GH#31884 is fixed, we suppress that warning. - warn = DeprecationWarning result = Index(data) - with tm.assert_produces_warning(warn): - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected) result = Index(np.array(data, dtype=object)) - with tm.assert_produces_warning(warn): - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize("swap_objs", [True, False]) def test_constructor_mixed_nat_objs_infers_object(self, swap_objs): diff --git a/pandas/tests/indexes/test_old_base.py b/pandas/tests/indexes/test_old_base.py index 9b4470021cc1d0..b929616c814eee 100644 --- a/pandas/tests/indexes/test_old_base.py +++ b/pandas/tests/indexes/test_old_base.py @@ -326,6 +326,30 @@ def test_memory_usage(self, index): if index.inferred_type == "object": assert result3 > result2 + def test_memory_usage_doesnt_trigger_engine(self, index): + index._cache.clear() + assert "_engine" not in index._cache + + res_without_engine = index.memory_usage() + assert "_engine" not in index._cache + + # explicitly load and cache the engine + _ = index._engine + assert "_engine" in index._cache + + res_with_engine = index.memory_usage() + + # the empty engine doesn't affect the result even when initialized with values, + # because engine.sizeof() doesn't consider the content of engine.values + assert res_with_engine == res_without_engine + + if len(index) == 0: + assert res_without_engine == 0 + assert res_with_engine == 0 + else: + assert res_without_engine > 0 + assert res_with_engine > 0 + def test_argsort(self, index): if isinstance(index, CategoricalIndex): pytest.skip(f"{type(self).__name__} separately tested") diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index 9a3471fe526c14..8fd349dacf9e94 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -882,7 +882,7 @@ def test_difference_incomparable(self, opname): b = Index([2, Timestamp("1999"), 1]) op = operator.methodcaller(opname, b) - with tm.assert_produces_warning(RuntimeWarning): + with tm.assert_produces_warning(RuntimeWarning, match="not supported between"): # sort=None, the default result = op(a) expected = Index([3, Timestamp("2000"), 2, Timestamp("1999")]) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 92addeb29252a4..749e2c4a86b55d 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -347,7 +347,7 @@ def test_split(self): # GH#37799 values = np.random.default_rng(2).standard_normal((3, 4)) blk = new_block(values, placement=BlockPlacement([3, 1, 6]), ndim=2) - result = blk._split() + result = list(blk._split()) # check that we get views, not copies values[:] = -9999 @@ -1280,19 +1280,20 @@ def test_interval_can_hold_element(self, dtype, element): # `elem` to not have the same length as `arr` ii2 = IntervalIndex.from_breaks(arr[:-1], closed="neither") elem = element(ii2) - with tm.assert_produces_warning(FutureWarning): + msg = "Setting an item of incompatible dtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): self.check_series_setitem(elem, ii, False) assert not blk._can_hold_element(elem) ii3 = IntervalIndex.from_breaks([Timestamp(1), Timestamp(3), Timestamp(4)]) elem = element(ii3) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, match=msg): self.check_series_setitem(elem, ii, False) assert not blk._can_hold_element(elem) ii4 = IntervalIndex.from_breaks([Timedelta(1), Timedelta(3), Timedelta(4)]) elem = element(ii4) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, match=msg): self.check_series_setitem(elem, ii, False) assert not blk._can_hold_element(elem) @@ -1312,12 +1313,13 @@ def test_period_can_hold_element(self, element): # `elem` to not have the same length as `arr` pi2 = pi.asfreq("D")[:-1] elem = element(pi2) - with tm.assert_produces_warning(FutureWarning): + msg = "Setting an item of incompatible dtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): self.check_series_setitem(elem, pi, False) dti = pi.to_timestamp("s")[:-1] elem = element(dti) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, match=msg): self.check_series_setitem(elem, pi, False) def check_can_hold_element(self, obj, elem, inplace: bool): diff --git a/pandas/tests/io/formats/test_css.py b/pandas/tests/io/formats/test_css.py index 8bf9aa4ac04d38..c4ecb48006cb19 100644 --- a/pandas/tests/io/formats/test_css.py +++ b/pandas/tests/io/formats/test_css.py @@ -38,30 +38,31 @@ def test_css_parse_normalisation(name, norm, abnorm): @pytest.mark.parametrize( - "invalid_css,remainder", + "invalid_css,remainder,msg", [ # No colon - ("hello-world", ""), - ("border-style: solid; hello-world", "border-style: solid"), + ("hello-world", "", "expected a colon"), + ("border-style: solid; hello-world", "border-style: solid", "expected a colon"), ( "border-style: solid; hello-world; font-weight: bold", "border-style: solid; font-weight: bold", + "expected a colon", ), # Unclosed string fail # Invalid size - ("font-size: blah", "font-size: 1em"), - ("font-size: 1a2b", "font-size: 1em"), - ("font-size: 1e5pt", "font-size: 1em"), - ("font-size: 1+6pt", "font-size: 1em"), - ("font-size: 1unknownunit", "font-size: 1em"), - ("font-size: 10", "font-size: 1em"), - ("font-size: 10 pt", "font-size: 1em"), + ("font-size: blah", "font-size: 1em", "Unhandled size"), + ("font-size: 1a2b", "font-size: 1em", "Unhandled size"), + ("font-size: 1e5pt", "font-size: 1em", "Unhandled size"), + ("font-size: 1+6pt", "font-size: 1em", "Unhandled size"), + ("font-size: 1unknownunit", "font-size: 1em", "Unhandled size"), + ("font-size: 10", "font-size: 1em", "Unhandled size"), + ("font-size: 10 pt", "font-size: 1em", "Unhandled size"), # Too many args - ("border-top: 1pt solid red green", "border-top: 1pt solid green"), + ("border-top: 1pt solid red green", "border-top: 1pt solid green", "Too many"), ], ) -def test_css_parse_invalid(invalid_css, remainder): - with tm.assert_produces_warning(CSSWarning): +def test_css_parse_invalid(invalid_css, remainder, msg): + with tm.assert_produces_warning(CSSWarning, match=msg): assert_same_resolution(invalid_css, remainder) @@ -120,7 +121,7 @@ def test_css_side_shorthands(shorthand, expansions): {top: "1pt", right: "4pt", bottom: "2pt", left: "0pt"}, ) - with tm.assert_produces_warning(CSSWarning): + with tm.assert_produces_warning(CSSWarning, match="Could not expand"): assert_resolves(f"{shorthand}: 1pt 1pt 1pt 1pt 1pt", {}) diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py index 3b782713eed6cf..b40201b9ba1e62 100644 --- a/pandas/tests/io/formats/test_to_excel.py +++ b/pandas/tests/io/formats/test_to_excel.py @@ -325,7 +325,7 @@ def test_css_to_excel_bad_colors(input_color): if input_color is not None: expected["fill"] = {"patternType": "solid"} - with tm.assert_produces_warning(CSSWarning): + with tm.assert_produces_warning(CSSWarning, match="Unhandled color format"): convert = CSSToExcelConverter() assert expected == convert(css) diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index ec49b7644ea0e7..a0d5b3a741aafa 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -639,7 +639,7 @@ def test_warns_non_roundtrippable_names(self, idx): # GH 19130 df = DataFrame(index=idx) df.index.name = "index" - with tm.assert_produces_warning(): + with tm.assert_produces_warning(UserWarning, match="not round-trippable"): set_default_names(df) def test_timestamp_in_columns(self): diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 5f19c15817ce75..babbddafa3b49c 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -222,7 +222,7 @@ def test_excel_sep_warning(self, df): # Separator is ignored when excel=False and should produce a warning def test_copy_delim_warning(self, df): - with tm.assert_produces_warning(): + with tm.assert_produces_warning(UserWarning, match="ignores the sep argument"): df.to_clipboard(excel=False, sep="\t") # Tests that the default behavior of to_clipboard is tab diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index f5880d8a894f82..ad729d2346a3b7 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -463,7 +463,7 @@ def test_warning_missing_utf_bom(self, encoding, compression_): index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), ) with tm.ensure_clean() as path: - with tm.assert_produces_warning(UnicodeWarning): + with tm.assert_produces_warning(UnicodeWarning, match="byte order mark"): df.to_csv(path, compression=compression_, encoding=encoding) # reading should fail (otherwise we wouldn't need the warning) diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 3a58dda9e8dc47..00082be7e07e86 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -133,7 +133,7 @@ def test_compression_warning(compression_only): ) with tm.ensure_clean() as path: with icom.get_handle(path, "w", compression=compression_only) as handles: - with tm.assert_produces_warning(RuntimeWarning): + with tm.assert_produces_warning(RuntimeWarning, match="has no effect"): df.to_csv(handles.handle, compression=compression_only) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 3083fa24ba8b58..af77972d9fd26b 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2602,7 +2602,7 @@ def close(self): self.conn.close() with contextlib.closing(MockSqliteConnection(":memory:")) as conn: - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="only supports SQLAlchemy"): sql.read_sql("SELECT 1", conn) diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 43c62237c6786b..d7fb3c00499655 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -189,11 +189,12 @@ def test_read_dta2(self, datapath): path2 = datapath("io", "data", "stata", "stata2_115.dta") path3 = datapath("io", "data", "stata", "stata2_117.dta") - with tm.assert_produces_warning(UserWarning): + msg = "Leaving in Stata Internal Format" + with tm.assert_produces_warning(UserWarning, match=msg): parsed_114 = self.read_dta(path1) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match=msg): parsed_115 = self.read_dta(path2) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match=msg): parsed_117 = self.read_dta(path3) # FIXME: don't leave commented-out # 113 is buggy due to limits of date format support in Stata @@ -478,7 +479,8 @@ def test_read_write_dta11(self, temp_file): formatted = formatted.astype(np.int32) path = temp_file - with tm.assert_produces_warning(InvalidColumnName): + msg = "Not all pandas column names were valid Stata variable names" + with tm.assert_produces_warning(InvalidColumnName, match=msg): original.to_stata(path, convert_dates=None) written_and_read_again = self.read_dta(path) @@ -515,7 +517,8 @@ def test_read_write_dta12(self, version, temp_file): formatted = formatted.astype(np.int32) path = temp_file - with tm.assert_produces_warning(InvalidColumnName): + msg = "Not all pandas column names were valid Stata variable names" + with tm.assert_produces_warning(InvalidColumnName, match=msg): original.to_stata(path, convert_dates=None, version=version) # should get a warning for that format. @@ -612,7 +615,8 @@ def test_numeric_column_names(self, temp_file): original.index.name = "index" path = temp_file # should get a warning for that format. - with tm.assert_produces_warning(InvalidColumnName): + msg = "Not all pandas column names were valid Stata variable names" + with tm.assert_produces_warning(InvalidColumnName, match=msg): original.to_stata(path) written_and_read_again = self.read_dta(path) @@ -672,7 +676,7 @@ def test_large_value_conversion(self, temp_file): original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3}) original.index.name = "index" path = temp_file - with tm.assert_produces_warning(PossiblePrecisionLoss): + with tm.assert_produces_warning(PossiblePrecisionLoss, match="from int64 to"): original.to_stata(path) written_and_read_again = self.read_dta(path) @@ -687,7 +691,8 @@ def test_dates_invalid_column(self, temp_file): original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)]) original.index.name = "index" path = temp_file - with tm.assert_produces_warning(InvalidColumnName): + msg = "Not all pandas column names were valid Stata variable names" + with tm.assert_produces_warning(InvalidColumnName, match=msg): original.to_stata(path, convert_dates={0: "tc"}) written_and_read_again = self.read_dta(path) @@ -1111,7 +1116,8 @@ def test_categorical_warnings_and_errors(self, temp_file): [["a"], ["b"], ["c"], ["d"], [1]], columns=["Too_long"] ).astype("category") - with tm.assert_produces_warning(ValueLabelTypeMismatch): + msg = "data file created has not lost information due to duplicate labels" + with tm.assert_produces_warning(ValueLabelTypeMismatch, match=msg): original.to_stata(path) # should get a warning for mixed content @@ -1732,7 +1738,8 @@ def test_convert_strl_name_swap(self, temp_file): ) original.index.name = "index" - with tm.assert_produces_warning(InvalidColumnName): + msg = "Not all pandas column names were valid Stata variable names" + with tm.assert_produces_warning(InvalidColumnName, match=msg): path = temp_file original.to_stata(path, convert_strl=["long", 1], version=117) reread = self.read_dta(path) @@ -1962,7 +1969,7 @@ def test_writer_118_exceptions(self, temp_file): "dtype_backend", ["numpy_nullable", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))], ) - def test_read_write_ea_dtypes(self, dtype_backend, temp_file): + def test_read_write_ea_dtypes(self, dtype_backend, temp_file, tmp_path): df = DataFrame( { "a": [1, 2, None], @@ -1974,7 +1981,8 @@ def test_read_write_ea_dtypes(self, dtype_backend, temp_file): index=pd.Index([0, 1, 2], name="index"), ) df = df.convert_dtypes(dtype_backend=dtype_backend) - df.to_stata("test_stata.dta", version=118) + stata_path = tmp_path / "test_stata.dta" + df.to_stata(stata_path, version=118) df.to_stata(temp_file) written_and_read_again = self.read_dta(temp_file) @@ -2138,8 +2146,9 @@ def test_chunked_categorical(version, temp_file): def test_chunked_categorical_partial(datapath): dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") values = ["a", "b", "a", "b", 3.0] + msg = "series with value labels are not fully labeled" with StataReader(dta_file, chunksize=2) as reader: - with tm.assert_produces_warning(CategoricalConversionWarning): + with tm.assert_produces_warning(CategoricalConversionWarning, match=msg): for i, block in enumerate(reader): assert list(block.cats) == values[2 * i : 2 * (i + 1)] if i < 2: @@ -2147,7 +2156,7 @@ def test_chunked_categorical_partial(datapath): else: idx = pd.Index([3.0], dtype="float64") tm.assert_index_equal(block.cats.cat.categories, idx) - with tm.assert_produces_warning(CategoricalConversionWarning): + with tm.assert_produces_warning(CategoricalConversionWarning, match=msg): with StataReader(dta_file, chunksize=5) as reader: large_chunk = reader.__next__() direct = read_stata(dta_file) @@ -2303,7 +2312,8 @@ def test_non_categorical_value_label_name_conversion(temp_file): "_1__2_": {3: "three"}, } - with tm.assert_produces_warning(InvalidColumnName): + msg = "Not all pandas column names were valid Stata variable names" + with tm.assert_produces_warning(InvalidColumnName, match=msg): data.to_stata(temp_file, value_labels=value_labels) with StataReader(temp_file) as reader: diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 65c9083d9fe2b2..c30cb96fef2528 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -2001,7 +2001,7 @@ def _check(axes): plt.close("all") gs, axes = _generate_4_axes_via_gridspec() - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): axes = df.plot(subplots=True, ax=axes, sharex=True) _check(axes) @@ -2065,7 +2065,7 @@ def _check(axes): plt.close("all") gs, axes = _generate_4_axes_via_gridspec() - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): axes = df.plot(subplots=True, ax=axes, sharey=True) gs.tight_layout(plt.gcf()) @@ -2186,7 +2186,7 @@ def _get_horizontal_grid(): # vertical / subplots / sharex=True / sharey=True ax1, ax2 = _get_vertical_grid() - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True) assert len(axes[0].lines) == 1 assert len(axes[1].lines) == 1 @@ -2202,7 +2202,7 @@ def _get_horizontal_grid(): # horizontal / subplots / sharex=True / sharey=True ax1, ax2 = _get_horizontal_grid() - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True) assert len(axes[0].lines) == 1 assert len(axes[1].lines) == 1 @@ -2252,7 +2252,7 @@ def _get_boxed_grid(): # subplots / sharex=True / sharey=True axes = _get_boxed_grid() - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True) for ax in axes: assert len(ax.lines) == 1 diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py index 511266d5786c54..a98f4b56ebf4df 100644 --- a/pandas/tests/plotting/frame/test_frame_subplots.py +++ b/pandas/tests/plotting/frame/test_frame_subplots.py @@ -335,7 +335,7 @@ def test_subplots_multiple_axes_2_dim(self, layout, exp_layout): np.random.default_rng(2).random((10, 4)), index=list(string.ascii_letters[:10]), ) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="layout keyword is ignored"): returned = df.plot( subplots=True, ax=axes, layout=layout, sharex=False, sharey=False ) @@ -501,7 +501,7 @@ def test_df_subplots_patterns_minorticks_1st_ax_hidden(self): columns=list("AB"), ) _, axes = plt.subplots(2, 1) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): axes = df.plot(subplots=True, ax=axes, sharex=True) for ax in axes: assert len(ax.lines) == 1 diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index f8029a1c1ee40f..573f95eed15efc 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -129,7 +129,8 @@ def test_boxplot_legacy2_with_multi_col(self): df["Y"] = Series(["A"] * 10) # Multiple columns with an ax argument should use same figure fig, ax = mpl.pyplot.subplots() - with tm.assert_produces_warning(UserWarning): + msg = "the figure containing the passed axes is being cleared" + with tm.assert_produces_warning(UserWarning, match=msg): axes = df.boxplot( column=["Col1", "Col2"], by="X", ax=ax, return_type="axes" ) @@ -607,7 +608,7 @@ def test_grouped_box_multiple_axes(self, hist_df): # passes multiple axes to plot, hist or boxplot # location should be changed if other test is added # which has earlier alphabetical order - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): _, axes = mpl.pyplot.subplots(2, 2) df.groupby("category").boxplot(column="height", return_type="axes", ax=axes) _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(2, 2)) @@ -617,7 +618,7 @@ def test_grouped_box_multiple_axes_on_fig(self, hist_df): # GH 6970, GH 7069 df = hist_df fig, axes = mpl.pyplot.subplots(2, 3) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): returned = df.boxplot( column=["height", "weight", "category"], by="gender", @@ -630,7 +631,7 @@ def test_grouped_box_multiple_axes_on_fig(self, hist_df): assert returned[0].figure is fig # draw on second row - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): returned = df.groupby("classroom").boxplot( column=["height", "weight", "category"], return_type="axes", ax=axes[1] ) @@ -647,7 +648,7 @@ def test_grouped_box_multiple_axes_ax_error(self, hist_df): _, axes = mpl.pyplot.subplots(2, 3) with pytest.raises(ValueError, match=msg): # pass different number of axes from required - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="sharex and sharey"): axes = df.groupby("classroom").boxplot(ax=axes) def test_fontsize(self): diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 46753b668a8b0f..422ed8d4f3d2b4 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -1558,7 +1558,7 @@ def test_mode_sortwarning(self): expected = Series(["foo", np.nan]) s = Series([1, "foo", "foo", np.nan, np.nan]) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="Unable to sort modes"): result = s.mode(dropna=False) result = result.sort_values().reset_index(drop=True) diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 9cd51b95d6efd6..3428abacd509e6 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -25,6 +25,29 @@ from pandas.core.resample import _asfreq_compat +@pytest.fixture( + params=[ + "linear", + "time", + "index", + "values", + "nearest", + "zero", + "slinear", + "quadratic", + "cubic", + "barycentric", + "krogh", + "from_derivatives", + "piecewise_polynomial", + "pchip", + "akima", + ], +) +def all_1d_no_arg_interpolation_methods(request): + return request.param + + @pytest.mark.parametrize("freq", ["2D", "1h"]) @pytest.mark.parametrize( "index", @@ -91,6 +114,56 @@ def test_resample_interpolate(index): tm.assert_frame_equal(result, expected) +def test_resample_interpolate_regular_sampling_off_grid( + all_1d_no_arg_interpolation_methods, +): + pytest.importorskip("scipy") + # GH#21351 + index = date_range("2000-01-01 00:01:00", periods=5, freq="2h") + ser = Series(np.arange(5.0), index) + + method = all_1d_no_arg_interpolation_methods + # Resample to 1 hour sampling and interpolate with the given method + ser_resampled = ser.resample("1h").interpolate(method) + + # Check that none of the resampled values are NaN, except the first one + # which lies 1 minute before the first actual data point + assert np.isnan(ser_resampled.iloc[0]) + assert not ser_resampled.iloc[1:].isna().any() + + if method not in ["nearest", "zero"]: + # Check that the resampled values are close to the expected values + # except for methods with known inaccuracies + assert np.all( + np.isclose(ser_resampled.values[1:], np.arange(0.5, 4.5, 0.5), rtol=1.0e-1) + ) + + +def test_resample_interpolate_irregular_sampling(all_1d_no_arg_interpolation_methods): + pytest.importorskip("scipy") + # GH#21351 + ser = Series( + np.linspace(0.0, 1.0, 5), + index=DatetimeIndex( + [ + "2000-01-01 00:00:03", + "2000-01-01 00:00:22", + "2000-01-01 00:00:24", + "2000-01-01 00:00:31", + "2000-01-01 00:00:39", + ] + ), + ) + + # Resample to 5 second sampling and interpolate with the given method + ser_resampled = ser.resample("5s").interpolate(all_1d_no_arg_interpolation_methods) + + # Check that none of the resampled values are NaN, except the first one + # which lies 3 seconds before the first actual data point + assert np.isnan(ser_resampled.iloc[0]) + assert not ser_resampled.iloc[1:].isna().any() + + def test_raises_on_non_datetimelike_index(): # this is a non datetimelike index xp = DataFrame() diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index 11ad9240527d56..5f5a54c4d92a3f 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -333,26 +333,98 @@ def test_upsample_sum(method, method_args, expected_values): tm.assert_series_equal(result, expected) -def test_groupby_resample_interpolate(): +@pytest.fixture +def groupy_test_df(): + return DataFrame( + {"price": [10, 11, 9], "volume": [50, 60, 50]}, + index=date_range("01/01/2018", periods=3, freq="W"), + ) + + +def test_groupby_resample_interpolate_raises(groupy_test_df): + # GH 35325 + + # Make a copy of the test data frame that has index.name=None + groupy_test_df_without_index_name = groupy_test_df.copy() + groupy_test_df_without_index_name.index.name = None + + dfs = [groupy_test_df, groupy_test_df_without_index_name] + + for df in dfs: + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + with pytest.raises( + NotImplementedError, + match="Direct interpolation of MultiIndex data frames is " + "not supported", + ): + df.groupby("volume").resample("1D").interpolate(method="linear") + + +def test_groupby_resample_interpolate_with_apply_syntax(groupy_test_df): # GH 35325 - d = {"price": [10, 11, 9], "volume": [50, 60, 50]} - df = DataFrame(d) + # Make a copy of the test data frame that has index.name=None + groupy_test_df_without_index_name = groupy_test_df.copy() + groupy_test_df_without_index_name.index.name = None - df["week_starting"] = date_range("01/01/2018", periods=3, freq="W") + dfs = [groupy_test_df, groupy_test_df_without_index_name] - msg = "DataFrameGroupBy.resample operated on the grouping columns" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = ( - df.set_index("week_starting") - .groupby("volume") - .resample("1D") - .interpolate(method="linear") + for df in dfs: + result = df.groupby("volume").apply( + lambda x: x.resample("1d").interpolate(method="linear"), + include_groups=False, ) - volume = [50] * 15 + [60] - week_starting = list(date_range("2018-01-07", "2018-01-21")) + [ - Timestamp("2018-01-14") + volume = [50] * 15 + [60] + week_starting = list(date_range("2018-01-07", "2018-01-21")) + [ + Timestamp("2018-01-14") + ] + expected_ind = pd.MultiIndex.from_arrays( + [volume, week_starting], + names=["volume", df.index.name], + ) + + expected = DataFrame( + data={ + "price": [ + 10.0, + 9.928571428571429, + 9.857142857142858, + 9.785714285714286, + 9.714285714285714, + 9.642857142857142, + 9.571428571428571, + 9.5, + 9.428571428571429, + 9.357142857142858, + 9.285714285714286, + 9.214285714285714, + 9.142857142857142, + 9.071428571428571, + 9.0, + 11.0, + ] + }, + index=expected_ind, + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_resample_interpolate_with_apply_syntax_off_grid(groupy_test_df): + """Similar test as test_groupby_resample_interpolate_with_apply_syntax but + with resampling that results in missing anchor points when interpolating. + See GH#21351.""" + # GH#21351 + result = groupy_test_df.groupby("volume").apply( + lambda x: x.resample("265h").interpolate(method="linear"), include_groups=False + ) + + volume = [50, 50, 60] + week_starting = [ + Timestamp("2018-01-07"), + Timestamp("2018-01-18 01:00:00"), + Timestamp("2018-01-14"), ] expected_ind = pd.MultiIndex.from_arrays( [volume, week_starting], @@ -363,24 +435,10 @@ def test_groupby_resample_interpolate(): data={ "price": [ 10.0, - 9.928571428571429, - 9.857142857142858, - 9.785714285714286, - 9.714285714285714, - 9.642857142857142, - 9.571428571428571, - 9.5, - 9.428571428571429, - 9.357142857142858, - 9.285714285714286, - 9.214285714285714, - 9.142857142857142, - 9.071428571428571, - 9.0, + 9.21131, 11.0, - ], - "volume": [50.0] * 15 + [60], + ] }, index=expected_ind, ) - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_names=False) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 7ab8ee24bd1945..5c5c06dea0008a 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1565,11 +1565,12 @@ def test_merge_on_ints_floats_warning(self): B = DataFrame({"Y": [1.1, 2.5, 3.0]}) expected = DataFrame({"X": [3], "Y": [3.0]}) - with tm.assert_produces_warning(UserWarning): + msg = "the float values are not equal to their int representation" + with tm.assert_produces_warning(UserWarning, match=msg): result = A.merge(B, left_on="X", right_on="Y") tm.assert_frame_equal(result, expected) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match=msg): result = B.merge(A, left_on="Y", right_on="X") tm.assert_frame_equal(result, expected[["Y", "X"]]) diff --git a/pandas/tests/scalar/timestamp/methods/test_to_pydatetime.py b/pandas/tests/scalar/timestamp/methods/test_to_pydatetime.py index 57f57e56201c87..be6ec7dbc24c71 100644 --- a/pandas/tests/scalar/timestamp/methods/test_to_pydatetime.py +++ b/pandas/tests/scalar/timestamp/methods/test_to_pydatetime.py @@ -24,7 +24,8 @@ def test_to_pydatetime_nonzero_nano(self): ts = Timestamp("2011-01-01 9:00:00.123456789") # Warn the user of data loss (nanoseconds). - with tm.assert_produces_warning(UserWarning): + msg = "Discarding nonzero nanoseconds in conversion" + with tm.assert_produces_warning(UserWarning, match=msg): expected = datetime(2011, 1, 1, 9, 0, 0, 123456) result = ts.to_pydatetime() assert result == expected diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index ea970433464fc4..79fd2850739836 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -501,8 +501,7 @@ def test_to_period_tz_warning(self): # GH#21333 make sure a warning is issued when timezone # info is lost ts = Timestamp("2009-04-15 16:17:18", tz="US/Eastern") - with tm.assert_produces_warning(UserWarning): - # warning that timezone info will be lost + with tm.assert_produces_warning(UserWarning, match="drop timezone information"): ts.to_period("D") def test_to_numpy_alias(self): diff --git a/pandas/tests/series/accessors/test_cat_accessor.py b/pandas/tests/series/accessors/test_cat_accessor.py index ca2768efd5c680..ce8ea27ea1fa23 100644 --- a/pandas/tests/series/accessors/test_cat_accessor.py +++ b/pandas/tests/series/accessors/test_cat_accessor.py @@ -200,6 +200,9 @@ def test_dt_accessor_api_for_categorical(self, idx): if func == "to_period" and getattr(idx, "tz", None) is not None: # dropping TZ warn_cls.append(UserWarning) + elif func == "to_pytimedelta": + # GH 57463 + warn_cls.append(FutureWarning) if warn_cls: warn_cls = tuple(warn_cls) else: diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py index 5f0057ac50b472..8c60f7beb317da 100644 --- a/pandas/tests/series/accessors/test_dt_accessor.py +++ b/pandas/tests/series/accessors/test_dt_accessor.py @@ -192,7 +192,9 @@ def test_dt_namespace_accessor_timedelta(self): assert isinstance(result, DataFrame) tm.assert_index_equal(result.index, ser.index) - result = ser.dt.to_pytimedelta() + msg = "The behavior of TimedeltaProperties.to_pytimedelta is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.dt.to_pytimedelta() assert isinstance(result, np.ndarray) assert result.dtype == object diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 99535f273075c1..7a2a4892f61fb5 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -1467,6 +1467,39 @@ def test_slice_key(self, obj, key, expected, warn, val, indexer_sli, is_inplace) raise AssertionError("xfail not relevant for this test.") +@pytest.mark.parametrize( + "exp_dtype", + [ + "M8[ms]", + "M8[ms, UTC]", + "m8[ms]", + ], +) +class TestCoercionDatetime64HigherReso(CoercionTest): + @pytest.fixture + def obj(self, exp_dtype): + idx = date_range("2011-01-01", freq="D", periods=4, unit="s") + if exp_dtype == "m8[ms]": + idx = idx - Timestamp("1970-01-01") + assert idx.dtype == "m8[s]" + elif exp_dtype == "M8[ms, UTC]": + idx = idx.tz_localize("UTC") + return Series(idx) + + @pytest.fixture + def val(self, exp_dtype): + ts = Timestamp("2011-01-02 03:04:05.678").as_unit("ms") + if exp_dtype == "m8[ms]": + return ts - Timestamp("1970-01-01") + elif exp_dtype == "M8[ms, UTC]": + return ts.tz_localize("UTC") + return ts + + @pytest.fixture + def warn(self): + return FutureWarning + + @pytest.mark.parametrize( "val,exp_dtype,warn", [ diff --git a/pandas/tests/series/methods/test_clip.py b/pandas/tests/series/methods/test_clip.py index 75b4050c18afeb..8ed422fc118dc5 100644 --- a/pandas/tests/series/methods/test_clip.py +++ b/pandas/tests/series/methods/test_clip.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.errors import OutOfBoundsDatetime + import pandas as pd from pandas import ( Series, @@ -131,12 +133,30 @@ def test_clip_with_datetimes(self): ) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("dtype", [object, "M8[us]"]) - def test_clip_with_timestamps_and_oob_datetimes(self, dtype): + def test_clip_with_timestamps_and_oob_datetimes_object(self): # GH-42794 - ser = Series([datetime(1, 1, 1), datetime(9999, 9, 9)], dtype=dtype) + ser = Series([datetime(1, 1, 1), datetime(9999, 9, 9)], dtype=object) result = ser.clip(lower=Timestamp.min, upper=Timestamp.max) - expected = Series([Timestamp.min, Timestamp.max], dtype=dtype) + expected = Series([Timestamp.min, Timestamp.max], dtype=object) + + tm.assert_series_equal(result, expected) + + def test_clip_with_timestamps_and_oob_datetimes_non_nano(self): + # GH#56410 + dtype = "M8[us]" + ser = Series([datetime(1, 1, 1), datetime(9999, 9, 9)], dtype=dtype) + + msg = ( + r"Incompatible \(high-resolution\) value for dtype='datetime64\[us\]'. " + "Explicitly cast before operating" + ) + with pytest.raises(OutOfBoundsDatetime, match=msg): + ser.clip(lower=Timestamp.min, upper=Timestamp.max) + + lower = Timestamp.min.as_unit("us") + upper = Timestamp.max.as_unit("us") + result = ser.clip(lower=lower, upper=upper) + expected = Series([lower, upper], dtype=dtype) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index 0965d36e4827d1..592dba253532dd 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -308,12 +308,7 @@ def test_datetime64_fillna(self): "scalar", [ False, - pytest.param( - True, - marks=pytest.mark.xfail( - reason="GH#56410 scalar case not yet addressed" - ), - ), + True, ], ) @pytest.mark.parametrize("tz", [None, "UTC"]) @@ -342,12 +337,7 @@ def test_datetime64_fillna_mismatched_reso_no_rounding(self, tz, scalar): "scalar", [ False, - pytest.param( - True, - marks=pytest.mark.xfail( - reason="GH#56410 scalar case not yet addressed" - ), - ), + True, ], ) def test_timedelta64_fillna_mismatched_reso_no_rounding(self, scalar): diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py index 1008c2c87dc9e4..ff7f8d0b7fa728 100644 --- a/pandas/tests/series/methods/test_interpolate.py +++ b/pandas/tests/series/methods/test_interpolate.py @@ -94,7 +94,12 @@ def test_interpolate(self, datetime_series): ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index) ts_copy = ts.copy() - ts_copy[5:10] = np.nan + + # Set data between Tuesday and Thursday to NaN for 2 consecutive weeks. + # Linear interpolation should fill in the missing values correctly, + # as the index is equally-spaced within each week. + ts_copy[1:4] = np.nan + ts_copy[6:9] = np.nan linear_interp = ts_copy.interpolate(method="linear") tm.assert_series_equal(linear_interp, ts) @@ -265,7 +270,7 @@ def test_nan_interpolate(self, kwargs): def test_nan_irregular_index(self): s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9]) result = s.interpolate() - expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9]) + expected = Series([1.0, 2.0, 2.6666666666666665, 4.0], index=[1, 3, 5, 9]) tm.assert_series_equal(result, expected) def test_nan_str_index(self): diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 44bf3475b85a60..f0930a831e98d7 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -359,12 +359,13 @@ def test_add_list_to_masked_array_boolean(self, request): else None ) ser = Series([True, None, False], dtype="boolean") - with tm.assert_produces_warning(warning): + msg = "operator is not supported by numexpr for the bool dtype" + with tm.assert_produces_warning(warning, match=msg): result = ser + [True, None, True] expected = Series([True, None, True], dtype="boolean") tm.assert_series_equal(result, expected) - with tm.assert_produces_warning(warning): + with tm.assert_produces_warning(warning, match=msg): result = [True, None, True] + ser tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_cumulative.py b/pandas/tests/series/test_cumulative.py index 9b7b08127a550b..a9d5486139b46b 100644 --- a/pandas/tests/series/test_cumulative.py +++ b/pandas/tests/series/test_cumulative.py @@ -170,6 +170,58 @@ def test_cummethods_bool_in_object_dtype(self, method, expected): result = getattr(ser, method)() tm.assert_series_equal(result, expected) + @pytest.mark.parametrize( + "method, order", + [ + ["cummax", "abc"], + ["cummin", "cba"], + ], + ) + def test_cummax_cummin_on_ordered_categorical(self, method, order): + # GH#52335 + cat = pd.CategoricalDtype(list(order), ordered=True) + ser = pd.Series( + list("ababcab"), + dtype=cat, + ) + result = getattr(ser, method)() + expected = pd.Series( + list("abbbccc"), + dtype=cat, + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "skip, exp", + [ + [True, ["a", np.nan, "b", "b", "c"]], + [False, ["a", np.nan, np.nan, np.nan, np.nan]], + ], + ) + @pytest.mark.parametrize( + "method, order", + [ + ["cummax", "abc"], + ["cummin", "cba"], + ], + ) + def test_cummax_cummin_ordered_categorical_nan(self, skip, exp, method, order): + # GH#52335 + cat = pd.CategoricalDtype(list(order), ordered=True) + ser = pd.Series( + ["a", np.nan, "b", "a", "c"], + dtype=cat, + ) + result = getattr(ser, method)(skipna=skip) + expected = pd.Series( + exp, + dtype=cat, + ) + tm.assert_series_equal( + result, + expected, + ) + def test_cumprod_timedelta(self): # GH#48111 ser = pd.Series([pd.Timedelta(days=1), pd.Timedelta(days=3)]) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 68dcc1a18eda70..8f275345a7819f 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -339,35 +339,36 @@ def test_bool_ops_warn_on_arithmetic(self, op_str, opname, monkeypatch): # raises TypeError return + msg = "operator is not supported by numexpr" with monkeypatch.context() as m: m.setattr(expr, "_MIN_ELEMENTS", 5) with option_context("compute.use_numexpr", True): - with tm.assert_produces_warning(): + with tm.assert_produces_warning(UserWarning, match=msg): r = f(df, df) e = fe(df, df) tm.assert_frame_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(UserWarning, match=msg): r = f(df.a, df.b) e = fe(df.a, df.b) tm.assert_series_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(UserWarning, match=msg): r = f(df.a, True) e = fe(df.a, True) tm.assert_series_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(UserWarning, match=msg): r = f(False, df.a) e = fe(False, df.a) tm.assert_series_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(UserWarning, match=msg): r = f(False, df) e = fe(False, df) tm.assert_frame_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(UserWarning, match=msg): r = f(df, True) e = fe(df, True) tm.assert_frame_equal(r, e) diff --git a/pandas/tests/test_optional_dependency.py b/pandas/tests/test_optional_dependency.py index 52b5f636b1254c..9127981d1845d5 100644 --- a/pandas/tests/test_optional_dependency.py +++ b/pandas/tests/test_optional_dependency.py @@ -42,7 +42,7 @@ def test_bad_version(monkeypatch): result = import_optional_dependency("fakemodule", min_version="0.8") assert result is module - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match=match): result = import_optional_dependency("fakemodule", errors="warn") assert result is None @@ -53,7 +53,7 @@ def test_bad_version(monkeypatch): with pytest.raises(ImportError, match="Pandas requires version '1.1.0'"): import_optional_dependency("fakemodule", min_version="1.1.0") - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match="Pandas requires version"): result = import_optional_dependency( "fakemodule", errors="warn", min_version="1.1.0" ) @@ -81,7 +81,7 @@ def test_submodule(monkeypatch): with pytest.raises(ImportError, match=match): import_optional_dependency("fakemodule.submodule") - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, match=match): result = import_optional_dependency("fakemodule.submodule", errors="warn") assert result is None diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index b59dd194cac272..f4042acd05dc38 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1705,22 +1705,24 @@ def test_to_datetime_month_or_year_unit_non_round_float(self, cache, unit): # GH#50301 # Match Timestamp behavior in disallowing non-round floats with # Y or M unit - warn_msg = "strings will be parsed as datetime strings" msg = f"Conversion of non-round float with unit={unit} is ambiguous" with pytest.raises(ValueError, match=msg): to_datetime([1.5], unit=unit, errors="raise") with pytest.raises(ValueError, match=msg): to_datetime(np.array([1.5]), unit=unit, errors="raise") + + msg = r"Given date string \"1.5\" not likely a datetime, at position 0" with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=warn_msg): - to_datetime(["1.5"], unit=unit, errors="raise") + to_datetime(["1.5"], unit=unit, errors="raise") res = to_datetime([1.5], unit=unit, errors="coerce") expected = Index([NaT], dtype="M8[ns]") tm.assert_index_equal(res, expected) - with tm.assert_produces_warning(FutureWarning, match=warn_msg): - res = to_datetime(["1.5"], unit=unit, errors="coerce") + # In 3.0, the string "1.5" is parsed as as it would be without unit, + # which fails. With errors="coerce" this becomes NaT. + res = to_datetime(["1.5"], unit=unit, errors="coerce") + expected = to_datetime([NaT]) tm.assert_index_equal(res, expected) # round floats are OK @@ -1735,14 +1737,6 @@ def test_unit(self, cache): with pytest.raises(ValueError, match=msg): to_datetime([1], unit="D", format="%Y%m%d", cache=cache) - def test_unit_str(self, cache): - # GH 57051 - # Test that strs aren't dropping precision to 32-bit accidentally. - with tm.assert_produces_warning(FutureWarning): - res = to_datetime(["1704660000"], unit="s", origin="unix") - expected = to_datetime([1704660000], unit="s", origin="unix") - tm.assert_index_equal(res, expected) - def test_unit_array_mixed_nans(self, cache): values = [11111111111111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""] @@ -1771,7 +1765,7 @@ def test_unit_array_mixed_nans_large_int(self, cache): def test_to_datetime_invalid_str_not_out_of_bounds_valuerror(self, cache): # if we have a string, then we raise a ValueError # and NOT an OutOfBoundsDatetime - msg = "non convertible value foo with the unit 's'" + msg = "Unknown datetime string format, unable to parse: foo, at position 0" with pytest.raises(ValueError, match=msg): to_datetime("foo", errors="raise", unit="s", cache=cache) @@ -1906,7 +1900,13 @@ def test_to_datetime_unit_na_values(self): @pytest.mark.parametrize("bad_val", ["foo", 111111111]) def test_to_datetime_unit_invalid(self, bad_val): - msg = f"{bad_val} with the unit 'D'" + if bad_val == "foo": + msg = ( + "Unknown datetime string format, unable to parse: " + f"{bad_val}, at position 2" + ) + else: + msg = "cannot convert input 111111111 with the unit 'D', at position 2" with pytest.raises(ValueError, match=msg): to_datetime([1, 2, bad_val], unit="D") diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index 0b3bc07c174528..f75f48157aad2c 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -475,9 +475,44 @@ def test_assert_series_equal_int_tol(): ) -def test_assert_series_equal_index_exact_default(): +@pytest.mark.parametrize( + "left_idx, right_idx", + [ + ( + pd.Index([0, 0.2, 0.4, 0.6, 0.8, 1]), + pd.Index(np.linspace(0, 1, 6)), + ), + ( + pd.MultiIndex.from_arrays([[0, 0, 0, 0, 1, 1], [0, 0.2, 0.4, 0.6, 0.8, 1]]), + pd.MultiIndex.from_arrays([[0, 0, 0, 0, 1, 1], np.linspace(0, 1, 6)]), + ), + ( + pd.MultiIndex.from_arrays( + [["a", "a", "a", "b", "b", "b"], [1, 2, 3, 4, 5, 10000000000001]] + ), + pd.MultiIndex.from_arrays( + [["a", "a", "a", "b", "b", "b"], [1, 2, 3, 4, 5, 10000000000002]] + ), + ), + pytest.param( + pd.Index([1, 2, 3, 4, 5, 10000000000001]), + pd.Index([1, 2, 3, 4, 5, 10000000000002]), + marks=pytest.mark.xfail(reason="check_exact_index defaults to True"), + ), + pytest.param( + pd.MultiIndex.from_arrays( + [[0, 0, 0, 0, 1, 1], [1, 2, 3, 4, 5, 10000000000001]] + ), + pd.MultiIndex.from_arrays( + [[0, 0, 0, 0, 1, 1], [1, 2, 3, 4, 5, 10000000000002]] + ), + marks=pytest.mark.xfail(reason="check_exact_index defaults to True"), + ), + ], +) +def test_assert_series_equal_check_exact_index_default(left_idx, right_idx): # GH#57067 - ser1 = Series(np.zeros(6, dtype=int), [0, 0.2, 0.4, 0.6, 0.8, 1]) - ser2 = Series(np.zeros(6, dtype=int), np.linspace(0, 1, 6)) + ser1 = Series(np.zeros(6, dtype=int), left_idx) + ser2 = Series(np.zeros(6, dtype=int), right_idx) tm.assert_series_equal(ser1, ser2) tm.assert_frame_equal(ser1.to_frame(), ser2.to_frame()) diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py index d375010aff3cc7..510a69a2ff3e4b 100644 --- a/pandas/tests/window/test_expanding.py +++ b/pandas/tests/window/test_expanding.py @@ -696,5 +696,7 @@ def test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype): def test_keyword_quantile_deprecated(): # GH #52550 ser = Series([1, 2, 3, 4]) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning( + FutureWarning, match="the 'quantile' keyword is deprecated, use 'q' instead" + ): ser.expanding().quantile(quantile=0.5) diff --git a/pandas/tests/window/test_rolling_quantile.py b/pandas/tests/window/test_rolling_quantile.py index d5a7010923563c..1604d72d4f9b14 100644 --- a/pandas/tests/window/test_rolling_quantile.py +++ b/pandas/tests/window/test_rolling_quantile.py @@ -178,5 +178,7 @@ def test_center_reindex_frame(frame, q): def test_keyword_quantile_deprecated(): # GH #52550 s = Series([1, 2, 3, 4]) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning( + FutureWarning, match="the 'quantile' keyword is deprecated, use 'q' instead" + ): s.rolling(2).quantile(quantile=0.4)