From 53243e8ec73ecf5035a63f426a9c703d6835e9a7 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Sat, 2 Sep 2023 14:07:16 +0200 Subject: [PATCH 01/93] REGR: drop_duplicates raising for arrow strings (#54913) --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/algorithms.py | 2 +- pandas/tests/series/methods/test_drop_duplicates.py | 9 +++++++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 8b4833f6ce043..e7bfda82494a3 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression in :func:`read_csv` when ``usecols`` is given and ``dtypes`` is a dict for ``engine="python"`` (:issue:`54868`) - Fixed regression in :meth:`.GroupBy.get_group` raising for ``axis=1`` (:issue:`54858`) - Fixed regression in :meth:`DataFrame.__setitem__` raising ``AssertionError`` when setting a :class:`Series` with a partial :class:`MultiIndex` (:issue:`54875`) +- Fixed regression in :meth:`Series.drop_duplicates` for PyArrow strings (:issue:`54904`) - Fixed regression in :meth:`Series.value_counts` raising for numeric data if ``bins`` was specified (:issue:`54857`) - Fixed regression when comparing a :class:`Series` with ``datetime64`` dtype with ``None`` (:issue:`54870`) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 5e22b774fb880..1d74bb8b83e4e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1000,7 +1000,7 @@ def duplicated( duplicated : ndarray[bool] """ if hasattr(values, "dtype"): - if isinstance(values.dtype, ArrowDtype): + if isinstance(values.dtype, ArrowDtype) and values.dtype.kind in "ifub": values = values._to_masked() # type: ignore[union-attr] if isinstance(values.dtype, BaseMaskedDtype): diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index 324ab1204e16e..10b2e98586365 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -1,6 +1,7 @@ import numpy as np import pytest +import pandas as pd from pandas import ( Categorical, Series, @@ -256,3 +257,11 @@ def test_duplicated_arrow_dtype(self): result = ser.drop_duplicates() expected = Series([True, False, None], dtype="bool[pyarrow]") tm.assert_series_equal(result, expected) + + def test_drop_duplicates_arrow_strings(self): + # GH#54904 + pa = pytest.importorskip("pyarrow") + ser = Series(["a", "a"], dtype=pd.ArrowDtype(pa.string())) + result = ser.drop_duplicates() + expecetd = Series(["a"], dtype=pd.ArrowDtype(pa.string())) + tm.assert_series_equal(result, expecetd) From 51135cec59f333b61932bde5b1b99f9c5a92d3cd Mon Sep 17 00:00:00 2001 From: Thomas Li <47963215+lithomas1@users.noreply.github.com> Date: Sat, 2 Sep 2023 13:01:34 -0400 Subject: [PATCH 02/93] BLD: Fix race condition (#54958) --- pandas/_libs/meson.build | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build index f302c649bc7bd..c0a9d1ad8ee4a 100644 --- a/pandas/_libs/meson.build +++ b/pandas/_libs/meson.build @@ -69,7 +69,8 @@ libs_sources = { 'index': {'sources': ['index.pyx', _index_class_helper]}, 'indexing': {'sources': ['indexing.pyx']}, 'internals': {'sources': ['internals.pyx']}, - 'interval': {'sources': ['interval.pyx', _intervaltree_helper]}, + 'interval': {'sources': ['interval.pyx', _intervaltree_helper], + 'deps': _khash_primitive_helper_dep}, 'join': {'sources': ['join.pyx', _khash_primitive_helper], 'deps': _khash_primitive_helper_dep}, 'lib': {'sources': ['lib.pyx', 'src/parser/tokenizer.c']}, From 1e72b2be10a8822bc6ad8949d5a41f8e9cdfb3ed Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Sat, 2 Sep 2023 20:14:04 +0200 Subject: [PATCH 03/93] REGR: read_csv splitting on comma with delim_whitespace (#54954) --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/_libs/src/parser/tokenizer.c | 3 ++- pandas/tests/io/parser/test_header.py | 26 ++++++++++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index e7bfda82494a3..d0882bdf094ad 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -15,6 +15,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :func:`merge` when merging over a PyArrow string index (:issue:`54894`) - Fixed regression in :func:`read_csv` when ``usecols`` is given and ``dtypes`` is a dict for ``engine="python"`` (:issue:`54868`) +- Fixed regression in :func:`read_csv` when ``delim_whitespace`` is True (:issue:`54918`, :issue:`54931`) - Fixed regression in :meth:`.GroupBy.get_group` raising for ``axis=1`` (:issue:`54858`) - Fixed regression in :meth:`DataFrame.__setitem__` raising ``AssertionError`` when setting a :class:`Series` with a partial :class:`MultiIndex` (:issue:`54875`) - Fixed regression in :meth:`Series.drop_duplicates` for PyArrow strings (:issue:`54904`) diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index abd3fb9e1fef3..ce8a38df172ef 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -664,7 +664,8 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes, ((!self->delim_whitespace && c == ' ' && self->skipinitialspace)) // applied when in a field -#define IS_DELIMITER(c) ((c == delimiter) || (delim_whitespace && isblank(c))) +#define IS_DELIMITER(c) \ + ((!delim_whitespace && c == delimiter) || (delim_whitespace && isblank(c))) #define _TOKEN_CLEANUP() \ self->stream_len = slen; \ diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py index 5cb54bb4e2916..d72174c40478e 100644 --- a/pandas/tests/io/parser/test_header.py +++ b/pandas/tests/io/parser/test_header.py @@ -658,3 +658,29 @@ def test_header_missing_rows(all_parsers): msg = r"Passed header=\[0,1,2\], len of 3, but only 2 lines in file" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), header=[0, 1, 2]) + + +@skip_pyarrow +def test_header_multiple_whitespaces(all_parsers): + # GH#54931 + parser = all_parsers + data = """aa bb(1,1) cc(1,1) + 0 2 3.5""" + + result = parser.read_csv(StringIO(data), sep=r"\s+") + expected = DataFrame({"aa": [0], "bb(1,1)": 2, "cc(1,1)": 3.5}) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +def test_header_delim_whitespace(all_parsers): + # GH#54918 + parser = all_parsers + data = """a,b +1,2 +3,4 + """ + + result = parser.read_csv(StringIO(data), delim_whitespace=True) + expected = DataFrame({"a,b": ["1,2", "3,4"]}) + tm.assert_frame_equal(result, expected) From 7688d52d15e9f9502cb033c0668853e4cc33bb3c Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Sat, 2 Sep 2023 14:14:39 -0400 Subject: [PATCH 04/93] REGR: MultiIndex.append raising for overlapping IntervalIndex levels (#54945) --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/arrays/categorical.py | 2 +- pandas/tests/indexes/multi/test_reshape.py | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index d0882bdf094ad..3848353187cde 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -18,6 +18,7 @@ Fixed regressions - Fixed regression in :func:`read_csv` when ``delim_whitespace`` is True (:issue:`54918`, :issue:`54931`) - Fixed regression in :meth:`.GroupBy.get_group` raising for ``axis=1`` (:issue:`54858`) - Fixed regression in :meth:`DataFrame.__setitem__` raising ``AssertionError`` when setting a :class:`Series` with a partial :class:`MultiIndex` (:issue:`54875`) +- Fixed regression in :meth:`MultiIndex.append` raising when appending overlapping :class:`IntervalIndex` levels (:issue:`54934`) - Fixed regression in :meth:`Series.drop_duplicates` for PyArrow strings (:issue:`54904`) - Fixed regression in :meth:`Series.value_counts` raising for numeric data if ``bins`` was specified (:issue:`54857`) - Fixed regression when comparing a :class:`Series` with ``datetime64`` dtype with ``None`` (:issue:`54870`) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 9f63d1f97c54f..da4bf987d0386 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2948,7 +2948,7 @@ def recode_for_categories( return codes indexer = coerce_indexer_dtype( - new_categories.get_indexer(old_categories), new_categories + new_categories.get_indexer_for(old_categories), new_categories ) new_codes = take_nd(indexer, codes, fill_value=-1) return new_codes diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py index da9838d4a2ed3..06dbb33aadf97 100644 --- a/pandas/tests/indexes/multi/test_reshape.py +++ b/pandas/tests/indexes/multi/test_reshape.py @@ -169,6 +169,28 @@ def test_append_names_dont_match(): tm.assert_index_equal(result, expected) +def test_append_overlapping_interval_levels(): + # GH 54934 + ivl1 = pd.IntervalIndex.from_breaks([0.0, 1.0, 2.0]) + ivl2 = pd.IntervalIndex.from_breaks([0.5, 1.5, 2.5]) + mi1 = MultiIndex.from_product([ivl1, ivl1]) + mi2 = MultiIndex.from_product([ivl2, ivl2]) + result = mi1.append(mi2) + expected = MultiIndex.from_tuples( + [ + (pd.Interval(0.0, 1.0), pd.Interval(0.0, 1.0)), + (pd.Interval(0.0, 1.0), pd.Interval(1.0, 2.0)), + (pd.Interval(1.0, 2.0), pd.Interval(0.0, 1.0)), + (pd.Interval(1.0, 2.0), pd.Interval(1.0, 2.0)), + (pd.Interval(0.5, 1.5), pd.Interval(0.5, 1.5)), + (pd.Interval(0.5, 1.5), pd.Interval(1.5, 2.5)), + (pd.Interval(1.5, 2.5), pd.Interval(0.5, 1.5)), + (pd.Interval(1.5, 2.5), pd.Interval(1.5, 2.5)), + ] + ) + tm.assert_index_equal(result, expected) + + def test_repeat(): reps = 2 numbers = [1, 2, 3] From 153952642ccc1413a120e957814247322789a4eb Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Sat, 2 Sep 2023 20:15:51 +0200 Subject: [PATCH 05/93] Infer large_string type as pyarrow_numpy strings (#54826) --- pandas/core/arrays/string_arrow.py | 9 +++++++++ pandas/io/_util.py | 5 ++++- .../tests/arrays/string_/test_string_arrow.py | 8 +++++++- pandas/tests/io/test_parquet.py | 19 +++++++++++++++++++ 4 files changed, 39 insertions(+), 2 deletions(-) diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index f438f75707265..aaa515ac459bd 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -450,6 +450,15 @@ def _str_rstrip(self, to_strip=None): class ArrowStringArrayNumpySemantics(ArrowStringArray): _storage = "pyarrow_numpy" + def __init__(self, values) -> None: + _chk_pyarrow_available() + + if isinstance(values, (pa.Array, pa.ChunkedArray)) and pa.types.is_large_string( + values.type + ): + values = pc.cast(values, pa.string()) + super().__init__(values) + @classmethod def _result_converter(cls, values, na=None): if not isna(na): diff --git a/pandas/io/_util.py b/pandas/io/_util.py index 915595833468d..3b2ae5daffdba 100644 --- a/pandas/io/_util.py +++ b/pandas/io/_util.py @@ -28,4 +28,7 @@ def _arrow_dtype_mapping() -> dict: def arrow_string_types_mapper() -> Callable: pa = import_optional_dependency("pyarrow") - return {pa.string(): pd.StringDtype(storage="pyarrow_numpy")}.get + return { + pa.string(): pd.StringDtype(storage="pyarrow_numpy"), + pa.large_string(): pd.StringDtype(storage="pyarrow_numpy"), + }.get diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py index 1ab628f186b47..09f9f788dc3e4 100644 --- a/pandas/tests/arrays/string_/test_string_arrow.py +++ b/pandas/tests/arrays/string_/test_string_arrow.py @@ -12,7 +12,10 @@ StringArray, StringDtype, ) -from pandas.core.arrays.string_arrow import ArrowStringArray +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) skip_if_no_pyarrow = pytest.mark.skipif( pa_version_under7p0, @@ -166,6 +169,9 @@ def test_pyarrow_not_installed_raises(): with pytest.raises(ImportError, match=msg): ArrowStringArray([]) + with pytest.raises(ImportError, match=msg): + ArrowStringArrayNumpySemantics([]) + with pytest.raises(ImportError, match=msg): ArrowStringArray._from_sequence(["a", None, "b"]) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index e5d445d762072..db3909c147ad3 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -1139,6 +1139,25 @@ def test_roundtrip_decimal(self, tmp_path, pa): expected = pd.DataFrame({"a": ["123"]}, dtype="string[python]") tm.assert_frame_equal(result, expected) + def test_infer_string_large_string_type(self, tmp_path, pa): + # GH#54798 + import pyarrow as pa + import pyarrow.parquet as pq + + path = tmp_path / "large_string.p" + + table = pa.table({"a": pa.array([None, "b", "c"], pa.large_string())}) + pq.write_table(table, path) + + with pd.option_context("future.infer_string", True): + result = read_parquet(path) + expected = pd.DataFrame( + data={"a": [None, "b", "c"]}, + dtype="string[pyarrow_numpy]", + columns=pd.Index(["a"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + class TestParquetFastParquet(Base): def test_basic(self, fp, df_full): From 1605bdfd40dbd4bc68ef9148792bbdec919cfc5f Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Sat, 2 Sep 2023 17:32:20 -0400 Subject: [PATCH 06/93] BUG: DataFrame.stack with future_stack=True failing when columns are tuples (#54962) --- doc/source/whatsnew/v2.1.1.rst | 2 +- pandas/core/reshape/reshape.py | 2 +- pandas/tests/frame/test_stack_unstack.py | 16 ++++++++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 3848353187cde..a6848dad6e3cd 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -28,7 +28,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- +- Fixed bug in :meth:`DataFrame.stack` with ``future_stack=True`` and columns a non-:class:`MultiIndex` consisting of tuples (:issue:`54948`) .. --------------------------------------------------------------------------- .. _whatsnew_211.other: diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index fc8d827cd31bb..bf7c7a1ee4dc7 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -908,7 +908,7 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame: data = frame.copy() else: # Take the data from frame corresponding to this idx value - if not isinstance(idx, tuple): + if len(level) == 1: idx = (idx,) gen = iter(idx) column_indexer = tuple( diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index c90b871d5d66f..dbd1f96fc17c9 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -2508,3 +2508,19 @@ def test_unstack_mixed_level_names(self): index=MultiIndex.from_tuples([(1, "red"), (2, "blue")], names=[0, "y"]), ) tm.assert_frame_equal(result, expected) + + +def test_stack_tuple_columns(future_stack): + # GH#54948 - test stack when the input has a non-MultiIndex with tuples + df = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=[("a", 1), ("a", 2), ("b", 1)] + ) + result = df.stack(future_stack=future_stack) + expected = Series( + [1, 2, 3, 4, 5, 6, 7, 8, 9], + index=MultiIndex( + levels=[[0, 1, 2], [("a", 1), ("a", 2), ("b", 1)]], + codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], + ), + ) + tm.assert_series_equal(result, expected) From c866a4ad6b419f33bc004513a41b3b64e0b587f6 Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Sat, 2 Sep 2023 17:33:08 -0400 Subject: [PATCH 07/93] BUG: Categorical.isin raising for overlapping intervals (#54951) fix Categorical.isin raising for overlapping intervals --- doc/source/whatsnew/v2.2.0.rst | 2 +- pandas/core/arrays/categorical.py | 2 +- pandas/tests/indexes/categorical/test_category.py | 7 +++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 17abb3debe3e7..89b4d102fcf04 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -171,7 +171,7 @@ Bug fixes Categorical ^^^^^^^^^^^ -- +- :meth:`Categorical.isin` raising ``InvalidIndexError`` for categorical containing overlapping :class:`Interval` values (:issue:`34974`) - Datetimelike diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index da4bf987d0386..8d2633c10b428 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2597,7 +2597,7 @@ def isin(self, values) -> npt.NDArray[np.bool_]: ) values = sanitize_array(values, None, None) null_mask = np.asarray(isna(values)) - code_values = self.categories.get_indexer(values) + code_values = self.categories.get_indexer_for(values) code_values = code_values[null_mask | (code_values >= 0)] return algorithms.isin(self.codes, code_values) diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 64cbe657a8aff..87facbf529411 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -228,6 +228,13 @@ def test_isin(self): expected = np.array([False] * 5 + [True]) tm.assert_numpy_array_equal(result, expected) + def test_isin_overlapping_intervals(self): + # GH 34974 + idx = pd.IntervalIndex([pd.Interval(0, 2), pd.Interval(0, 1)]) + result = CategoricalIndex(idx).isin(idx) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + def test_identical(self): ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) From 4b456e23278b2e92b13e5c2bd2a5e621a8057bd1 Mon Sep 17 00:00:00 2001 From: Francisco Alfaro Date: Sun, 3 Sep 2023 17:42:14 -0300 Subject: [PATCH 08/93] new pandas cheat sheet fomats (#54928) * delete README.txt add new README version add alternative Pandas Cheat Sheets learning * modify README.txt * modify README.md 1.1 --- doc/cheatsheet/README.md | 22 ++++++++++++++++++++++ doc/cheatsheet/README.txt | 8 -------- 2 files changed, 22 insertions(+), 8 deletions(-) create mode 100644 doc/cheatsheet/README.md delete mode 100644 doc/cheatsheet/README.txt diff --git a/doc/cheatsheet/README.md b/doc/cheatsheet/README.md new file mode 100644 index 0000000000000..6c33de104ed90 --- /dev/null +++ b/doc/cheatsheet/README.md @@ -0,0 +1,22 @@ +# Pandas Cheat Sheet + +The Pandas Cheat Sheet was created using Microsoft Powerpoint 2013. +To create the PDF version, within Powerpoint, simply do a "Save As" +and pick "PDF" as the format. + +This cheat sheet, originally written by Irv Lustig, [Princeton Consultants](https://www.princetonoptimization.com/), was inspired by the [RStudio Data Wrangling Cheatsheet](https://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf). + +| Topic | PDF | PPT | +|------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Pandas_Cheat_Sheet | | | +| Pandas_Cheat_Sheet_JA | | | + + +**Alternative** + +Alternatively, if you want to complement your learning, you can use the Pandas Cheat sheets +developed by [DataCamp](https://www.datacamp.com/) in "PDF", "Google Colab" and "Streamlit" formats. + +| Topic | PDF | Streamlit | Google Colab | +|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Pandas | | | Open In Colab | diff --git a/doc/cheatsheet/README.txt b/doc/cheatsheet/README.txt deleted file mode 100644 index c57da38b31777..0000000000000 --- a/doc/cheatsheet/README.txt +++ /dev/null @@ -1,8 +0,0 @@ -The Pandas Cheat Sheet was created using Microsoft Powerpoint 2013. -To create the PDF version, within Powerpoint, simply do a "Save As" -and pick "PDF" as the format. - -This cheat sheet was inspired by the RStudio Data Wrangling Cheatsheet[1], written by Irv Lustig, Princeton Consultants[2]. - -[1]: https://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf -[2]: https://www.princetonoptimization.com/ From 4d3b536975a285b180b5fc5f6d1b77700ea5d256 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Mon, 4 Sep 2023 11:09:41 +0200 Subject: [PATCH 09/93] REGR: rountripping datetime through sqlite doesn't work (#54985) --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/io/sql.py | 2 -- pandas/tests/io/test_sql.py | 7 +++++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index a6848dad6e3cd..11b19b1508a71 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -18,6 +18,7 @@ Fixed regressions - Fixed regression in :func:`read_csv` when ``delim_whitespace`` is True (:issue:`54918`, :issue:`54931`) - Fixed regression in :meth:`.GroupBy.get_group` raising for ``axis=1`` (:issue:`54858`) - Fixed regression in :meth:`DataFrame.__setitem__` raising ``AssertionError`` when setting a :class:`Series` with a partial :class:`MultiIndex` (:issue:`54875`) +- Fixed regression in :meth:`DataFrame.to_sql` not roundtripping datetime columns correctly for sqlite (:issue:`54877`) - Fixed regression in :meth:`MultiIndex.append` raising when appending overlapping :class:`IntervalIndex` levels (:issue:`54934`) - Fixed regression in :meth:`Series.drop_duplicates` for PyArrow strings (:issue:`54904`) - Fixed regression in :meth:`Series.value_counts` raising for numeric data if ``bins`` was specified (:issue:`54857`) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 7669d5aa4cea5..2b139f8ca527c 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -2091,13 +2091,11 @@ def _adapt_time(t) -> str: adapt_date_iso = lambda val: val.isoformat() adapt_datetime_iso = lambda val: val.isoformat() - adapt_datetime_epoch = lambda val: int(val.timestamp()) sqlite3.register_adapter(time, _adapt_time) sqlite3.register_adapter(date, adapt_date_iso) sqlite3.register_adapter(datetime, adapt_datetime_iso) - sqlite3.register_adapter(datetime, adapt_datetime_epoch) convert_date = lambda val: date.fromisoformat(val.decode()) convert_datetime = lambda val: datetime.fromisoformat(val.decode()) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 9ec0ba0b12a76..bfa93a4ff910e 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2962,6 +2962,13 @@ def test_read_sql_string_inference(self): tm.assert_frame_equal(result, expected) + def test_roundtripping_datetimes(self): + # GH#54877 + df = DataFrame({"t": [datetime(2020, 12, 31, 12)]}, dtype="datetime64[ns]") + df.to_sql("test", self.conn, if_exists="replace", index=False) + result = pd.read_sql("select * from test", self.conn).iloc[0, 0] + assert result == "2020-12-31 12:00:00.000000" + @pytest.mark.db class TestMySQLAlchemy(_TestSQLAlchemy): From 31d4d8b547d1872de2fc10351c3f906d68a9c48a Mon Sep 17 00:00:00 2001 From: Natalia Mokeeva <91160475+natmokval@users.noreply.github.com> Date: Mon, 4 Sep 2023 11:52:00 +0200 Subject: [PATCH 10/93] DOC: fix an example in whatsnew/v0.15.2.rst (#54986) fix example in whatsnew/v0.15.2.rst --- doc/source/whatsnew/v0.15.2.rst | 62 ++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/doc/source/whatsnew/v0.15.2.rst b/doc/source/whatsnew/v0.15.2.rst index bb7beef449d93..acc5409b86d09 100644 --- a/doc/source/whatsnew/v0.15.2.rst +++ b/doc/source/whatsnew/v0.15.2.rst @@ -24,25 +24,61 @@ API changes - Indexing in ``MultiIndex`` beyond lex-sort depth is now supported, though a lexically sorted index will have a better performance. (:issue:`2646`) - .. ipython:: python - :okexcept: - :okwarning: + .. code-block:: ipython + + In [1]: df = pd.DataFrame({'jim':[0, 0, 1, 1], + ...: 'joe':['x', 'x', 'z', 'y'], + ...: 'jolie':np.random.rand(4)}).set_index(['jim', 'joe']) + ...: - df = pd.DataFrame({'jim':[0, 0, 1, 1], - 'joe':['x', 'x', 'z', 'y'], - 'jolie':np.random.rand(4)}).set_index(['jim', 'joe']) - df - df.index.lexsort_depth + In [2]: df + Out[2]: + jolie + jim joe + 0 x 0.126970 + x 0.966718 + 1 z 0.260476 + y 0.897237 + + [4 rows x 1 columns] + + In [3]: df.index.lexsort_depth + Out[3]: 1 # in prior versions this would raise a KeyError # will now show a PerformanceWarning - df.loc[(1, 'z')] + In [4]: df.loc[(1, 'z')] + Out[4]: + jolie + jim joe + 1 z 0.260476 + + [1 rows x 1 columns] # lexically sorting - df2 = df.sort_index() - df2 - df2.index.lexsort_depth - df2.loc[(1,'z')] + In [5]: df2 = df.sort_index() + + In [6]: df2 + Out[6]: + jolie + jim joe + 0 x 0.126970 + x 0.966718 + 1 y 0.897237 + z 0.260476 + + [4 rows x 1 columns] + + In [7]: df2.index.lexsort_depth + Out[7]: 2 + + In [8]: df2.loc[(1,'z')] + Out[8]: + jolie + jim joe + 1 z 0.260476 + + [1 rows x 1 columns] - Bug in unique of Series with ``category`` dtype, which returned all categories regardless whether they were "used" or not (see :issue:`8559` for the discussion). From 982d619bddbf85a905b4ec1e719275e2ab4f833d Mon Sep 17 00:00:00 2001 From: caneff Date: Mon, 4 Sep 2023 10:31:58 -0400 Subject: [PATCH 11/93] TYP: Add typing.overload signatures to DataFrame/Series.interpolate (#54999) * Add inplace overloads for interpolate This will help our type checker work better and is a plain improvement to the type hints. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- pandas/core/generic.py | 45 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b9407ebe6624a..671cfc11df597 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7938,6 +7938,51 @@ def replace( else: return result.__finalize__(self, method="replace") + @overload + def interpolate( + self, + method: InterpolateOptions = ..., + *, + axis: Axis = ..., + limit: int | None = ..., + inplace: Literal[False] = ..., + limit_direction: Literal["forward", "backward", "both"] | None = ..., + limit_area: Literal["inside", "outside"] | None = ..., + downcast: Literal["infer"] | None | lib.NoDefault = ..., + **kwargs, + ) -> Self: + ... + + @overload + def interpolate( + self, + method: InterpolateOptions = ..., + *, + axis: Axis = ..., + limit: int | None = ..., + inplace: Literal[True], + limit_direction: Literal["forward", "backward", "both"] | None = ..., + limit_area: Literal["inside", "outside"] | None = ..., + downcast: Literal["infer"] | None | lib.NoDefault = ..., + **kwargs, + ) -> None: + ... + + @overload + def interpolate( + self, + method: InterpolateOptions = ..., + *, + axis: Axis = ..., + limit: int | None = ..., + inplace: bool_t = ..., + limit_direction: Literal["forward", "backward", "both"] | None = ..., + limit_area: Literal["inside", "outside"] | None = ..., + downcast: Literal["infer"] | None | lib.NoDefault = ..., + **kwargs, + ) -> Self | None: + ... + @final def interpolate( self, From e30e5f85c2e28aedb6273116f1e112c1ecc859f0 Mon Sep 17 00:00:00 2001 From: caneff Date: Mon, 4 Sep 2023 13:23:33 -0400 Subject: [PATCH 12/93] TYP: Add typing.overload signatures to DataFrame/Series.clip (#55002) * TYP: Add typing.overload signatures to DataFrame/Series.clip This adds overloads so that a type checker can determine whether clip returns a Series/DataFrame or None based on the value of the inplace argument. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- pandas/core/generic.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 671cfc11df597..e9b0c23b18373 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8652,6 +8652,42 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace): # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) + @overload + def clip( + self, + lower=..., + upper=..., + *, + axis: Axis | None = ..., + inplace: Literal[False] = ..., + **kwargs, + ) -> Self: + ... + + @overload + def clip( + self, + lower=..., + upper=..., + *, + axis: Axis | None = ..., + inplace: Literal[True], + **kwargs, + ) -> None: + ... + + @overload + def clip( + self, + lower=..., + upper=..., + *, + axis: Axis | None = ..., + inplace: bool_t = ..., + **kwargs, + ) -> Self | None: + ... + @final def clip( self, From f03482094a6f17cedf1e0db3bc474ff4944a518f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 19:52:19 -0400 Subject: [PATCH 13/93] [pre-commit.ci] pre-commit autoupdate (#55004) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.285 → v0.0.287](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.285...v0.0.287) - [github.com/jendrikseipp/vulture: v2.7 → v2.9.1](https://github.com/jendrikseipp/vulture/compare/v2.7...v2.9.1) - [github.com/pylint-dev/pylint: v3.0.0a6 → v3.0.0a7](https://github.com/pylint-dev/pylint/compare/v3.0.0a6...v3.0.0a7) - [github.com/sphinx-contrib/sphinx-lint: v0.6.7 → v0.6.8](https://github.com/sphinx-contrib/sphinx-lint/compare/v0.6.7...v0.6.8) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- asv_bench/benchmarks/array.py | 2 +- asv_bench/benchmarks/join_merge.py | 4 ++-- pandas/core/dtypes/dtypes.py | 2 +- pandas/core/indexes/api.py | 2 +- pandas/tests/frame/methods/test_copy.py | 2 +- pandas/tests/frame/methods/test_reset_index.py | 8 ++++---- pandas/tests/frame/methods/test_sort_index.py | 2 +- pandas/tests/frame/test_constructors.py | 6 +++--- pandas/tests/groupby/test_groupby.py | 2 +- pandas/tests/groupby/test_timegrouper.py | 2 +- pandas/tests/indexes/multi/test_partial_indexing.py | 2 +- pandas/tests/indexing/multiindex/test_getitem.py | 2 +- pandas/tests/io/json/test_pandas.py | 2 +- pandas/tests/io/json/test_ujson.py | 2 +- pandas/tests/io/test_parquet.py | 4 ++-- pandas/tests/io/test_stata.py | 2 +- pandas/tests/reshape/test_cut.py | 2 +- pandas/tests/reshape/test_pivot.py | 4 ++-- pandas/tests/series/methods/test_reindex.py | 4 ++-- pandas/tests/window/test_rolling_functions.py | 6 +++--- 21 files changed, 35 insertions(+), 35 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f9bcd78c07b0..c01bf65818167 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: hooks: - id: black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.285 + rev: v0.0.287 hooks: - id: ruff args: [--exit-non-zero-on-fix] @@ -34,7 +34,7 @@ repos: alias: ruff-selected-autofixes args: [--select, "ANN001,ANN204", --fix-only, --exit-non-zero-on-fix] - repo: https://github.com/jendrikseipp/vulture - rev: 'v2.7' + rev: 'v2.9.1' hooks: - id: vulture entry: python scripts/run_vulture.py @@ -84,7 +84,7 @@ repos: '--filter=-readability/casting,-runtime/int,-build/include_subdir,-readability/fn_size' ] - repo: https://github.com/pylint-dev/pylint - rev: v3.0.0a6 + rev: v3.0.0a7 hooks: - id: pylint stages: [manual] @@ -124,7 +124,7 @@ repos: types: [text] # overwrite types: [rst] types_or: [python, rst] - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.6.7 + rev: v0.6.8 hooks: - id: sphinx-lint - repo: local diff --git a/asv_bench/benchmarks/array.py b/asv_bench/benchmarks/array.py index 09c4acc0ab309..0229cf15fbfb8 100644 --- a/asv_bench/benchmarks/array.py +++ b/asv_bench/benchmarks/array.py @@ -90,7 +90,7 @@ def time_setitem(self, multiple_chunks): self.array[i] = "foo" def time_setitem_list(self, multiple_chunks): - indexer = list(range(0, 50)) + list(range(-1000, 0, 50)) + indexer = list(range(50)) + list(range(-1000, 0, 50)) self.array[indexer] = ["foo"] * len(indexer) def time_setitem_slice(self, multiple_chunks): diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 54bcdb0fa2843..04ac47a892a22 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -360,14 +360,14 @@ class MergeCategoricals: def setup(self): self.left_object = DataFrame( { - "X": np.random.choice(range(0, 10), size=(10000,)), + "X": np.random.choice(range(10), size=(10000,)), "Y": np.random.choice(["one", "two", "three"], size=(10000,)), } ) self.right_object = DataFrame( { - "X": np.random.choice(range(0, 10), size=(10000,)), + "X": np.random.choice(range(10), size=(10000,)), "Z": np.random.choice(["jjj", "kkk", "sss"], size=(10000,)), } ) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index f76163cbbd0a1..0589dc5b717a4 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -70,7 +70,7 @@ from collections.abc import MutableMapping from datetime import tzinfo - import pyarrow as pa # noqa: F811, TCH004 + import pyarrow as pa # noqa: TCH004 from pandas._typing import ( Dtype, diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 781dfae7fef64..a8ef0e034ba9b 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -377,5 +377,5 @@ def all_indexes_same(indexes) -> bool: def default_index(n: int) -> RangeIndex: - rng = range(0, n) + rng = range(n) return RangeIndex._simple_new(rng, name=None) diff --git a/pandas/tests/frame/methods/test_copy.py b/pandas/tests/frame/methods/test_copy.py index 95fcaaa473067..e7901ed363106 100644 --- a/pandas/tests/frame/methods/test_copy.py +++ b/pandas/tests/frame/methods/test_copy.py @@ -56,7 +56,7 @@ def test_copy_consolidates(self): } ) - for i in range(0, 10): + for i in range(10): df.loc[:, f"n_{i}"] = np.random.default_rng(2).integers(0, 100, size=55) assert len(df._mgr.blocks) == 11 diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index d99dd36f3a2e3..339e19254fd10 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -788,15 +788,15 @@ def test_errorreset_index_rename(float_frame): def test_reset_index_false_index_name(): - result_series = Series(data=range(5, 10), index=range(0, 5)) + result_series = Series(data=range(5, 10), index=range(5)) result_series.index.name = False result_series.reset_index() - expected_series = Series(range(5, 10), RangeIndex(range(0, 5), name=False)) + expected_series = Series(range(5, 10), RangeIndex(range(5), name=False)) tm.assert_series_equal(result_series, expected_series) # GH 38147 - result_frame = DataFrame(data=range(5, 10), index=range(0, 5)) + result_frame = DataFrame(data=range(5, 10), index=range(5)) result_frame.index.name = False result_frame.reset_index() - expected_frame = DataFrame(range(5, 10), RangeIndex(range(0, 5), name=False)) + expected_frame = DataFrame(range(5, 10), RangeIndex(range(5), name=False)) tm.assert_frame_equal(result_frame, expected_frame) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 228b62a418813..985a9e3602410 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -911,7 +911,7 @@ def test_sort_index_multiindex_sparse_column(self): expected = DataFrame( { i: pd.array([0.0, 0.0, 0.0, 0.0], dtype=pd.SparseDtype("float64", 0.0)) - for i in range(0, 4) + for i in range(4) }, index=MultiIndex.from_product([[1, 2], [1, 2]]), ) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 3e2cde37c30eb..fd851ab244cb8 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -692,12 +692,12 @@ def test_constructor_error_msgs(self): arr = np.array([[4, 5, 6]]) msg = r"Shape of passed values is \(1, 3\), indices imply \(1, 4\)" with pytest.raises(ValueError, match=msg): - DataFrame(index=[0], columns=range(0, 4), data=arr) + DataFrame(index=[0], columns=range(4), data=arr) arr = np.array([4, 5, 6]) msg = r"Shape of passed values is \(3, 1\), indices imply \(1, 4\)" with pytest.raises(ValueError, match=msg): - DataFrame(index=[0], columns=range(0, 4), data=arr) + DataFrame(index=[0], columns=range(4), data=arr) # higher dim raise exception with pytest.raises(ValueError, match="Must pass 2-d input"): @@ -2391,7 +2391,7 @@ def test_construct_with_two_categoricalindex_series(self): def test_constructor_series_nonexact_categoricalindex(self): # GH 42424 - ser = Series(range(0, 100)) + ser = Series(range(100)) ser1 = cut(ser, 10).value_counts().head(5) ser2 = cut(ser, 10).value_counts().tail(5) result = DataFrame({"1": ser1, "2": ser2}) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index be226b4466f98..1e6d220199e22 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1928,7 +1928,7 @@ def test_pivot_table_values_key_error(): df = DataFrame( { "eventDate": date_range(datetime.today(), periods=20, freq="M").tolist(), - "thename": range(0, 20), + "thename": range(20), } ) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index c9fe011f7063b..55f96bd1443de 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -842,7 +842,7 @@ def test_grouper_period_index(self): result = period_series.groupby(period_series.index.month).sum() expected = Series( - range(0, periods), index=Index(range(1, periods + 1), name=index.name) + range(periods), index=Index(range(1, periods + 1), name=index.name) ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/multi/test_partial_indexing.py b/pandas/tests/indexes/multi/test_partial_indexing.py index 47efc43d5eae0..66163dad3deae 100644 --- a/pandas/tests/indexes/multi/test_partial_indexing.py +++ b/pandas/tests/indexes/multi/test_partial_indexing.py @@ -31,7 +31,7 @@ def df(): dr = date_range("2016-01-01", "2016-01-03", freq="12H") abc = ["a", "b", "c"] mi = MultiIndex.from_product([dr, abc]) - frame = DataFrame({"c1": range(0, 15)}, index=mi) + frame = DataFrame({"c1": range(15)}, index=mi) return frame diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py index 9d11827e2923e..b86e233110e88 100644 --- a/pandas/tests/indexing/multiindex/test_getitem.py +++ b/pandas/tests/indexing/multiindex/test_getitem.py @@ -148,7 +148,7 @@ def test_frame_getitem_simple_key_error( def test_tuple_string_column_names(): # GH#50372 mi = MultiIndex.from_tuples([("a", "aa"), ("a", "ab"), ("b", "ba"), ("b", "bb")]) - df = DataFrame([range(0, 4), range(1, 5), range(2, 6)], columns=mi) + df = DataFrame([range(4), range(1, 5), range(2, 6)], columns=mi) df["single_index"] = 0 df_flat = df.copy() diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index ca3ce6ba34515..b3c2e67f7c318 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -2044,7 +2044,7 @@ def test_read_json_dtype_backend(self, string_storage, dtype_backend, orient): ) if orient == "values": - expected.columns = list(range(0, 8)) + expected.columns = list(range(8)) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 5bb7097770820..d5f8c5200c4a3 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -1033,7 +1033,7 @@ def test_decode_floating_point(self, sign, float_number): def test_encode_big_set(self): s = set() - for x in range(0, 100000): + for x in range(100000): s.add(x) # Make sure no Exception is raised. diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index db3909c147ad3..55445e44b9366 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -1012,7 +1012,7 @@ def test_timezone_aware_index(self, request, pa, timezone_aware_date_list): def test_filter_row_groups(self, pa): # https://github.com/pandas-dev/pandas/issues/26551 pytest.importorskip("pyarrow") - df = pd.DataFrame({"a": list(range(0, 3))}) + df = pd.DataFrame({"a": list(range(3))}) with tm.ensure_clean() as path: df.to_parquet(path, engine=pa) result = read_parquet( @@ -1219,7 +1219,7 @@ def test_categorical(self, fp): check_round_trip(df, fp) def test_filter_row_groups(self, fp): - d = {"a": list(range(0, 3))} + d = {"a": list(range(3))} df = pd.DataFrame(d) with tm.ensure_clean() as path: df.to_parquet(path, engine=fp, compression=None, row_group_offsets=1) diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 7459aa1df8f3e..cd504616b6c5d 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -798,7 +798,7 @@ def test_missing_value_generator(self): expected_values.insert(0, ".") for t in types: offset = valid_range[t][1] - for i in range(0, 27): + for i in range(27): val = StataMissingValue(offset + 1 + i) assert val.string == expected_values[i] diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py index b2a6ac49fdff2..81b466b059702 100644 --- a/pandas/tests/reshape/test_cut.py +++ b/pandas/tests/reshape/test_cut.py @@ -700,7 +700,7 @@ def test_cut_with_duplicated_index_lowest_included(): def test_cut_with_nonexact_categorical_indices(): # GH 42424 - ser = Series(range(0, 100)) + ser = Series(range(100)) ser1 = cut(ser, 10).value_counts().head(5) ser2 = cut(ser, 10).value_counts().tail(5) result = DataFrame({"1": ser1, "2": ser2}) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 46da18445e135..c43fd05fd5501 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -33,7 +33,7 @@ def dropna(request): return request.param -@pytest.fixture(params=[([0] * 4, [1] * 4), (range(0, 3), range(1, 4))]) +@pytest.fixture(params=[([0] * 4, [1] * 4), (range(3), range(1, 4))]) def interval_values(request, closed): left, right = request.param return Categorical(pd.IntervalIndex.from_arrays(left, right, closed)) @@ -215,7 +215,7 @@ def test_pivot_table_dropna_categoricals(self, dropna): { "A": ["a", "a", "a", "b", "b", "b", "c", "c", "c"], "B": [1, 2, 3, 1, 2, 3, 1, 2, 3], - "C": range(0, 9), + "C": range(9), } ) diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py index bce7d2d554004..016208f2d2026 100644 --- a/pandas/tests/series/methods/test_reindex.py +++ b/pandas/tests/series/methods/test_reindex.py @@ -159,9 +159,9 @@ def test_reindex_inference(): def test_reindex_downcasting(): # GH4618 shifted series downcasting - s = Series(False, index=range(0, 5)) + s = Series(False, index=range(5)) result = s.shift(1).bfill() - expected = Series(False, index=range(0, 5)) + expected = Series(False, index=range(5)) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/window/test_rolling_functions.py b/pandas/tests/window/test_rolling_functions.py index 940f0845befa2..51f801ab3761b 100644 --- a/pandas/tests/window/test_rolling_functions.py +++ b/pandas/tests/window/test_rolling_functions.py @@ -388,7 +388,7 @@ def test_rolling_max_resample(step): # So that we can have 3 datapoints on last day (4, 10, and 20) indices.append(datetime(1975, 1, 5, 1)) indices.append(datetime(1975, 1, 5, 2)) - series = Series(list(range(0, 5)) + [10, 20], index=indices) + series = Series(list(range(5)) + [10, 20], index=indices) # Use floats instead of ints as values series = series.map(lambda x: float(x)) # Sort chronologically @@ -425,7 +425,7 @@ def test_rolling_min_resample(step): # So that we can have 3 datapoints on last day (4, 10, and 20) indices.append(datetime(1975, 1, 5, 1)) indices.append(datetime(1975, 1, 5, 2)) - series = Series(list(range(0, 5)) + [10, 20], index=indices) + series = Series(list(range(5)) + [10, 20], index=indices) # Use floats instead of ints as values series = series.map(lambda x: float(x)) # Sort chronologically @@ -445,7 +445,7 @@ def test_rolling_median_resample(): # So that we can have 3 datapoints on last day (4, 10, and 20) indices.append(datetime(1975, 1, 5, 1)) indices.append(datetime(1975, 1, 5, 2)) - series = Series(list(range(0, 5)) + [10, 20], index=indices) + series = Series(list(range(5)) + [10, 20], index=indices) # Use floats instead of ints as values series = series.map(lambda x: float(x)) # Sort chronologically From 4683e920434cbc2ee9e797e106d616c74f72afd0 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Tue, 5 Sep 2023 20:07:21 +0200 Subject: [PATCH 14/93] BUG: ArrowDtype raising for fixed size list (#55000) * BUG: ArrowDtype raising for fixed size list * Update v2.1.1.rst * Update test_arrow.py --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/dtypes/dtypes.py | 2 ++ pandas/tests/extension/test_arrow.py | 9 +++++++++ 3 files changed, 12 insertions(+) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 11b19b1508a71..64d7481117e8e 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -29,6 +29,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Fixed bug for :class:`ArrowDtype` raising ``NotImplementedError`` for fixed-size list (:issue:`55000`) - Fixed bug in :meth:`DataFrame.stack` with ``future_stack=True`` and columns a non-:class:`MultiIndex` consisting of tuples (:issue:`54948`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 0589dc5b717a4..12de63967c78f 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -2148,6 +2148,8 @@ def type(self): return CategoricalDtypeType elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type): return list + elif pa.types.is_fixed_size_list(pa_type): + return list elif pa.types.is_map(pa_type): return list elif pa.types.is_struct(pa_type): diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 5f1b16a44b8e9..fa6e85ba204d2 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -2992,6 +2992,15 @@ def test_groupby_count_return_arrow_dtype(data_missing): tm.assert_frame_equal(result, expected) +def test_fixed_size_list(): + # GH#55000 + ser = pd.Series( + [[1, 2], [3, 4]], dtype=ArrowDtype(pa.list_(pa.int64(), list_size=2)) + ) + result = ser.dtype.type + assert result == list + + def test_arrowextensiondtype_dataframe_repr(): # GH 54062 df = pd.DataFrame( From dac46b4652cd94bf6b873af372df3f1544796d5e Mon Sep 17 00:00:00 2001 From: mhb143 <139927657+mhb143@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:09:45 -0600 Subject: [PATCH 15/93] DOC: Grammatically updated the tech docs (#54989) Grammatically updated the tech docs Co-authored-by: Molly Bowers --- .../getting_started/intro_tutorials/01_table_oriented.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/getting_started/intro_tutorials/01_table_oriented.rst b/doc/source/getting_started/intro_tutorials/01_table_oriented.rst index 2dcc8b0abe3b8..caaff3557ae40 100644 --- a/doc/source/getting_started/intro_tutorials/01_table_oriented.rst +++ b/doc/source/getting_started/intro_tutorials/01_table_oriented.rst @@ -106,9 +106,9 @@ between square brackets ``[]``. .. note:: - If you are familiar to Python + If you are familiar with Python :ref:`dictionaries `, the selection of a - single column is very similar to selection of dictionary values based on + single column is very similar to the selection of dictionary values based on the key. You can create a ``Series`` from scratch as well: From 5b02305db6647d030ec66788a8ca9f37fe9a2790 Mon Sep 17 00:00:00 2001 From: Paul Uhlenbruck <48606747+pauluhlenbruck@users.noreply.github.com> Date: Tue, 5 Sep 2023 20:12:12 +0200 Subject: [PATCH 16/93] DOC: expanded pandas.DataFrame.to_sql docstring (#54988) expanded pandas.DataFrame.to_sql docstring Co-authored-by: vboxuser --- pandas/core/generic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e9b0c23b18373..06284b05ba1b1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2847,7 +2847,7 @@ def to_sql( index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column - name in the table. + name in the table. Creates a table index for this column. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. From 00bf889426ac40426cec0b4d71eb361a24ad1a8f Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Tue, 5 Sep 2023 20:29:50 +0200 Subject: [PATCH 17/93] ENH: Use more arrow compute functions for string[pyarrow] dtype (#54957) --- pandas/core/arrays/string_arrow.py | 50 +++++++++++++++--------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index aaa515ac459bd..60d7ae1b998f5 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -417,7 +417,7 @@ def _str_isupper(self): def _str_len(self): result = pc.utf8_length(self._pa_array) - return Int64Dtype().__from_arrow__(result) + return self._convert_int_dtype(result) def _str_lower(self): return type(self)(pc.utf8_lower(self._pa_array)) @@ -446,6 +446,29 @@ def _str_rstrip(self, to_strip=None): result = pc.utf8_rtrim(self._pa_array, characters=to_strip) return type(self)(result) + def _str_count(self, pat: str, flags: int = 0): + if flags: + return super()._str_count(pat, flags) + result = pc.count_substring_regex(self._pa_array, pat) + return self._convert_int_dtype(result) + + def _str_find(self, sub: str, start: int = 0, end: int | None = None): + if start != 0 and end is not None: + slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end) + result = pc.find_substring(slices, sub) + not_found = pc.equal(result, -1) + offset_result = pc.add(result, end - start) + result = pc.if_else(not_found, result, offset_result) + elif start == 0 and end is None: + slices = self._pa_array + result = pc.find_substring(slices, sub) + else: + return super()._str_find(sub, start, end) + return self._convert_int_dtype(result) + + def _convert_int_dtype(self, result): + return Int64Dtype().__from_arrow__(result) + class ArrowStringArrayNumpySemantics(ArrowStringArray): _storage = "pyarrow_numpy" @@ -526,34 +549,11 @@ def _str_map( return lib.map_infer_mask(arr, f, mask.view("uint8")) def _convert_int_dtype(self, result): + result = result.to_numpy() if result.dtype == np.int32: result = result.astype(np.int64) return result - def _str_count(self, pat: str, flags: int = 0): - if flags: - return super()._str_count(pat, flags) - result = pc.count_substring_regex(self._pa_array, pat).to_numpy() - return self._convert_int_dtype(result) - - def _str_len(self): - result = pc.utf8_length(self._pa_array).to_numpy() - return self._convert_int_dtype(result) - - def _str_find(self, sub: str, start: int = 0, end: int | None = None): - if start != 0 and end is not None: - slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end) - result = pc.find_substring(slices, sub) - not_found = pc.equal(result, -1) - offset_result = pc.add(result, end - start) - result = pc.if_else(not_found, result, offset_result) - elif start == 0 and end is None: - slices = self._pa_array - result = pc.find_substring(slices, sub) - else: - return super()._str_find(sub, start, end) - return self._convert_int_dtype(result.to_numpy()) - def _cmp_method(self, other, op): result = super()._cmp_method(other, op) return result.to_numpy(np.bool_, na_value=False) From e1ec244688b80b52456264285033659bed01ffcd Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Tue, 5 Sep 2023 20:43:25 +0200 Subject: [PATCH 18/93] REGR: interpolate raising if fill_value is given (#54927) * REGR: interpolate raising if fill_value is given * Update test and message * Update pandas/core/generic.py --------- Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/generic.py | 5 +++-- pandas/tests/series/methods/test_interpolate.py | 8 ++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 64d7481117e8e..6f431ca7eea5f 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.to_sql` not roundtripping datetime columns correctly for sqlite (:issue:`54877`) - Fixed regression in :meth:`MultiIndex.append` raising when appending overlapping :class:`IntervalIndex` levels (:issue:`54934`) - Fixed regression in :meth:`Series.drop_duplicates` for PyArrow strings (:issue:`54904`) +- Fixed regression in :meth:`Series.interpolate` raising when ``fill_value`` was given (:issue:`54920`) - Fixed regression in :meth:`Series.value_counts` raising for numeric data if ``bins`` was specified (:issue:`54857`) - Fixed regression when comparing a :class:`Series` with ``datetime64`` dtype with ``None`` (:issue:`54870`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 06284b05ba1b1..e6bf55a1cbadf 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8225,10 +8225,11 @@ def interpolate( stacklevel=find_stack_level(), ) - if "fill_value" in kwargs: + if method in fillna_methods and "fill_value" in kwargs: raise ValueError( "'fill_value' is not a valid keyword for " - f"{type(self).__name__}.interpolate" + f"{type(self).__name__}.interpolate with method from " + f"{fillna_methods}" ) if isinstance(obj.index, MultiIndex) and method != "linear": diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py index 619690f400d98..549f429f09d35 100644 --- a/pandas/tests/series/methods/test_interpolate.py +++ b/pandas/tests/series/methods/test_interpolate.py @@ -858,3 +858,11 @@ def test_interpolate_asfreq_raises(self): with pytest.raises(ValueError, match=msg): with tm.assert_produces_warning(FutureWarning, match=msg2): ser.interpolate(method="asfreq") + + def test_interpolate_fill_value(self): + # GH#54920 + pytest.importorskip("scipy") + ser = Series([np.nan, 0, 1, np.nan, 3, np.nan]) + result = ser.interpolate(method="nearest", fill_value=0) + expected = Series([np.nan, 0, 1, 1, 3, 0]) + tm.assert_series_equal(result, expected) From e7a1f9ddc88a32fe1c9f4379f9344595137f8d20 Mon Sep 17 00:00:00 2001 From: Abdullah Ihsan Secer Date: Tue, 5 Sep 2023 19:44:58 +0100 Subject: [PATCH 19/93] BUG: Fix Rolling where duplicate datetimelike indexes are treated as consecutive rather than equal with closed='left' and closed='neither' (#54917) * Add bugfix for rolling window with nonunique datetimelike index * Run black * Add entry to whatsnew * Fix VariableOffsetWindowIndexer * Simplify change in indexers.pyx * Add test --- doc/source/whatsnew/v2.2.0.rst | 1 + pandas/_libs/window/indexers.pyx | 2 + pandas/core/indexers/objects.py | 4 +- pandas/tests/window/test_groupby.py | 38 +++++++++------- pandas/tests/window/test_rolling.py | 70 +++++++++++++++++++++++++++++ 5 files changed, 98 insertions(+), 17 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 89b4d102fcf04..bd15d5fa085e9 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -168,6 +168,7 @@ Performance improvements Bug fixes ~~~~~~~~~ - Bug in :class:`AbstractHolidayCalendar` where timezone data was not propagated when computing holiday observances (:issue:`54580`) +- Bug in :class:`pandas.core.window.Rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`) Categorical ^^^^^^^^^^^ diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx index 02934346130a5..7b306c5e681e0 100644 --- a/pandas/_libs/window/indexers.pyx +++ b/pandas/_libs/window/indexers.pyx @@ -138,6 +138,8 @@ def calculate_variable_window_bounds( break # end bound is previous end # or current index + elif index[end[i - 1]] == end_bound and not right_closed: + end[i] = end[i - 1] + 1 elif (index[end[i - 1]] - end_bound) * index_growth_sign <= 0: end[i] = i + 1 else: diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py index 694a420ad2494..c13ec51ff3851 100644 --- a/pandas/core/indexers/objects.py +++ b/pandas/core/indexers/objects.py @@ -262,7 +262,9 @@ def get_window_bounds( # end bound is previous end # or current index end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign - if end_diff <= zero: + if end_diff == zero and not right_closed: + end[i] = end[i - 1] + 1 + elif end_diff <= zero: end[i] = i + 1 else: end[i] = end[i - 1] diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py index ab00e18fc4812..46ab00c3e2284 100644 --- a/pandas/tests/window/test_groupby.py +++ b/pandas/tests/window/test_groupby.py @@ -466,20 +466,23 @@ def test_groupby_rolling_subset_with_closed(self): # GH 35549 df = DataFrame( { - "column1": range(6), - "column2": range(6), - "group": 3 * ["A", "B"], - "date": [Timestamp("2019-01-01")] * 6, + "column1": range(8), + "column2": range(8), + "group": ["A"] * 4 + ["B"] * 4, + "date": [ + Timestamp(date) + for date in ["2019-01-01", "2019-01-01", "2019-01-02", "2019-01-02"] + ] + * 2, } ) result = ( df.groupby("group").rolling("1D", on="date", closed="left")["column1"].sum() ) expected = Series( - [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0], - index=MultiIndex.from_tuples( - [("A", Timestamp("2019-01-01"))] * 3 - + [("B", Timestamp("2019-01-01"))] * 3, + [np.nan, np.nan, 1.0, 1.0, np.nan, np.nan, 9.0, 9.0], + index=MultiIndex.from_frame( + df[["group", "date"]], names=["group", "date"], ), name="column1", @@ -490,10 +493,14 @@ def test_groupby_subset_rolling_subset_with_closed(self): # GH 35549 df = DataFrame( { - "column1": range(6), - "column2": range(6), - "group": 3 * ["A", "B"], - "date": [Timestamp("2019-01-01")] * 6, + "column1": range(8), + "column2": range(8), + "group": ["A"] * 4 + ["B"] * 4, + "date": [ + Timestamp(date) + for date in ["2019-01-01", "2019-01-01", "2019-01-02", "2019-01-02"] + ] + * 2, } ) @@ -503,10 +510,9 @@ def test_groupby_subset_rolling_subset_with_closed(self): .sum() ) expected = Series( - [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0], - index=MultiIndex.from_tuples( - [("A", Timestamp("2019-01-01"))] * 3 - + [("B", Timestamp("2019-01-01"))] * 3, + [np.nan, np.nan, 1.0, 1.0, np.nan, np.nan, 9.0, 9.0], + index=MultiIndex.from_frame( + df[["group", "date"]], names=["group", "date"], ), name="column1", diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index f4d903dc19fb7..a02f132e540ac 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -304,6 +304,76 @@ def test_datetimelike_nonunique_index_centering( tm.assert_equal(result, expected) +@pytest.mark.parametrize( + "closed,expected", + [ + ("left", [np.nan, np.nan, 1, 1, 1, 10, 14, 14, 18, 21]), + ("neither", [np.nan, np.nan, 1, 1, 1, 9, 5, 5, 13, 8]), + ("right", [0, 1, 3, 6, 10, 14, 11, 18, 21, 17]), + ("both", [0, 1, 3, 6, 10, 15, 20, 27, 26, 30]), + ], +) +def test_variable_window_nonunique(closed, expected, frame_or_series): + # GH 20712 + index = DatetimeIndex( + [ + "2011-01-01", + "2011-01-01", + "2011-01-02", + "2011-01-02", + "2011-01-02", + "2011-01-03", + "2011-01-04", + "2011-01-04", + "2011-01-05", + "2011-01-06", + ] + ) + + df = frame_or_series(range(10), index=index, dtype=float) + expected = frame_or_series(expected, index=index, dtype=float) + + result = df.rolling("2D", closed=closed).sum() + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "closed,expected", + [ + ("left", [np.nan, np.nan, 1, 1, 1, 10, 15, 15, 18, 21]), + ("neither", [np.nan, np.nan, 1, 1, 1, 10, 15, 15, 13, 8]), + ("right", [0, 1, 3, 6, 10, 15, 21, 28, 21, 17]), + ("both", [0, 1, 3, 6, 10, 15, 21, 28, 26, 30]), + ], +) +def test_variable_offset_window_nonunique(closed, expected, frame_or_series): + # GH 20712 + index = DatetimeIndex( + [ + "2011-01-01", + "2011-01-01", + "2011-01-02", + "2011-01-02", + "2011-01-02", + "2011-01-03", + "2011-01-04", + "2011-01-04", + "2011-01-05", + "2011-01-06", + ] + ) + + df = frame_or_series(range(10), index=index, dtype=float) + expected = frame_or_series(expected, index=index, dtype=float) + + offset = BusinessDay(2) + indexer = VariableOffsetWindowIndexer(index=index, offset=offset) + result = df.rolling(indexer, closed=closed, min_periods=1).sum() + + tm.assert_equal(result, expected) + + def test_even_number_window_alignment(): # see discussion in GH 38780 s = Series(range(3), index=date_range(start="2020-01-01", freq="D", periods=3)) From a317995665c5185a7a31316555d34fe5653941fc Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Tue, 5 Sep 2023 20:45:42 +0200 Subject: [PATCH 20/93] REGR: concat raising for 2 different ea dtypes (#54914) * REGR: concat raising for 2 different ea dtypes * Update --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/internals/concat.py | 2 +- pandas/tests/reshape/concat/test_concat.py | 9 +++++++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 6f431ca7eea5f..258f05d4277bd 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -13,6 +13,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in :func:`concat` when :class:`DataFrame` 's have two different extension dtypes (:issue:`54848`) - Fixed regression in :func:`merge` when merging over a PyArrow string index (:issue:`54894`) - Fixed regression in :func:`read_csv` when ``usecols`` is given and ``dtypes`` is a dict for ``engine="python"`` (:issue:`54868`) - Fixed regression in :func:`read_csv` when ``delim_whitespace`` is True (:issue:`54918`, :issue:`54931`) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 4d33f0137d3c4..b2d463a8c6c26 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -177,7 +177,7 @@ def concatenate_managers( values = np.concatenate(vals, axis=1) # type: ignore[arg-type] elif is_1d_only_ea_dtype(blk.dtype): # TODO(EA2D): special-casing not needed with 2D EAs - values = concat_compat(vals, axis=1, ea_compat_axis=True) + values = concat_compat(vals, axis=0, ea_compat_axis=True) values = ensure_block_shape(values, ndim=2) else: values = concat_compat(vals, axis=1) diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 3efcd930af581..5dde863f246d1 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -858,3 +858,12 @@ def test_concat_multiindex_with_category(): ) expected = expected.set_index(["c1", "c2"]) tm.assert_frame_equal(result, expected) + + +def test_concat_ea_upcast(): + # GH#54848 + df1 = DataFrame(["a"], dtype="string") + df2 = DataFrame([1], dtype="Int64") + result = concat([df1, df2]) + expected = DataFrame(["a", 1], index=[0, 0]) + tm.assert_frame_equal(result, expected) From e6814133c3df0948d5fb10a01074cea569a852fe Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Tue, 5 Sep 2023 10:07:09 -1000 Subject: [PATCH 21/93] CI: Ignore hypothesis differing executors (#55013) --- pandas/conftest.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pandas/conftest.py b/pandas/conftest.py index a4f58e99d8bcc..ac0275bf695d4 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -71,6 +71,7 @@ Index, MultiIndex, ) +from pandas.util.version import Version if TYPE_CHECKING: from collections.abc import ( @@ -191,6 +192,10 @@ def pytest_collection_modifyitems(items, config) -> None: item.add_marker(pytest.mark.arraymanager) +hypothesis_health_checks = [hypothesis.HealthCheck.too_slow] +if Version(hypothesis.__version__) >= Version("6.83.2"): + hypothesis_health_checks.append(hypothesis.HealthCheck.differing_executors) + # Hypothesis hypothesis.settings.register_profile( "ci", @@ -202,7 +207,7 @@ def pytest_collection_modifyitems(items, config) -> None: # 2022-02-09: Changed deadline from 500 -> None. Deadline leads to # non-actionable, flaky CI failures (# GH 24641, 44969, 45118, 44969) deadline=None, - suppress_health_check=(hypothesis.HealthCheck.too_slow,), + suppress_health_check=tuple(hypothesis_health_checks), ) hypothesis.settings.load_profile("ci") From 1b3ebe4656fca6607738851e46eaa53a9f970293 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Wed, 6 Sep 2023 00:32:36 +0200 Subject: [PATCH 22/93] Include pyarrow_numpy string in efficient merge implementation (#54974) Inlude pyarrow_numpy string in efficient merge implementation --- pandas/core/reshape/merge.py | 3 ++- pandas/tests/reshape/merge/test_merge.py | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 8ef3943ab0d8d..5b07a0010acdd 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2421,7 +2421,8 @@ def _factorize_keys( elif isinstance(lk, ExtensionArray) and lk.dtype == rk.dtype: if (isinstance(lk.dtype, ArrowDtype) and is_string_dtype(lk.dtype)) or ( - isinstance(lk.dtype, StringDtype) and lk.dtype.storage == "pyarrow" + isinstance(lk.dtype, StringDtype) + and lk.dtype.storage in ["pyarrow", "pyarrow_numpy"] ): import pyarrow as pa import pyarrow.compute as pc diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 9cada6964c094..4659c16909ed7 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2949,13 +2949,13 @@ def test_merge_ea_int_and_float_numpy(): tm.assert_frame_equal(result, expected.astype("float64")) -def test_merge_arrow_string_index(): +def test_merge_arrow_string_index(any_string_dtype): # GH#54894 pytest.importorskip("pyarrow") - left = DataFrame({"a": ["a", "b"]}, dtype="string[pyarrow]") - right = DataFrame({"b": 1}, index=Index(["a", "c"], dtype="string[pyarrow]")) + left = DataFrame({"a": ["a", "b"]}, dtype=any_string_dtype) + right = DataFrame({"b": 1}, index=Index(["a", "c"], dtype=any_string_dtype)) result = left.merge(right, left_on="a", right_index=True, how="left") expected = DataFrame( - {"a": Series(["a", "b"], dtype="string[pyarrow]"), "b": [1, np.nan]} + {"a": Series(["a", "b"], dtype=any_string_dtype), "b": [1, np.nan]} ) tm.assert_frame_equal(result, expected) From da849a95646717013c93838cc798dcc31aee2290 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Wed, 6 Sep 2023 00:41:51 +0200 Subject: [PATCH 23/93] REG: filter not respecting the order of labels (#54982) --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/generic.py | 8 +++++--- pandas/tests/frame/methods/test_filter.py | 14 ++++++++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 258f05d4277bd..b9bdb36fe0ed3 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -19,6 +19,7 @@ Fixed regressions - Fixed regression in :func:`read_csv` when ``delim_whitespace`` is True (:issue:`54918`, :issue:`54931`) - Fixed regression in :meth:`.GroupBy.get_group` raising for ``axis=1`` (:issue:`54858`) - Fixed regression in :meth:`DataFrame.__setitem__` raising ``AssertionError`` when setting a :class:`Series` with a partial :class:`MultiIndex` (:issue:`54875`) +- Fixed regression in :meth:`DataFrame.filter` not respecting the order of elements for ``filter`` (:issue:`54980`) - Fixed regression in :meth:`DataFrame.to_sql` not roundtripping datetime columns correctly for sqlite (:issue:`54877`) - Fixed regression in :meth:`MultiIndex.append` raising when appending overlapping :class:`IntervalIndex` levels (:issue:`54934`) - Fixed regression in :meth:`Series.drop_duplicates` for PyArrow strings (:issue:`54904`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e6bf55a1cbadf..8c1406fc305e3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5718,10 +5718,12 @@ def filter( if items is not None: name = self._get_axis_name(axis) + items = Index(items).intersection(labels) + if len(items) == 0: + # Keep the dtype of labels when we are empty + items = items.astype(labels.dtype) # error: Keywords must be strings - return self.reindex( # type: ignore[misc] - **{name: labels.intersection(items)} - ) + return self.reindex(**{name: items}) # type: ignore[misc] elif like: def f(x) -> bool_t: diff --git a/pandas/tests/frame/methods/test_filter.py b/pandas/tests/frame/methods/test_filter.py index 1a2fbf8a65a55..9d5e6876bb08c 100644 --- a/pandas/tests/frame/methods/test_filter.py +++ b/pandas/tests/frame/methods/test_filter.py @@ -137,3 +137,17 @@ def test_filter_regex_non_string(self): result = df.filter(regex="STRING") expected = df[["STRING"]] tm.assert_frame_equal(result, expected) + + def test_filter_keep_order(self): + # GH#54980 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + result = df.filter(items=["B", "A"]) + expected = df[["B", "A"]] + tm.assert_frame_equal(result, expected) + + def test_filter_different_dtype(self): + # GH#54980 + df = DataFrame({1: [1, 2, 3], 2: [4, 5, 6]}) + result = df.filter(items=["B", "A"]) + expected = df[[]] + tm.assert_frame_equal(result, expected) From 876d7858721b4f364f431d38f849594aa0eadc7e Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Wed, 6 Sep 2023 11:00:58 +0200 Subject: [PATCH 24/93] Enable Arrow implementation for removeprefix (#54972) --- pandas/core/arrays/arrow/array.py | 10 +++++----- pandas/core/arrays/string_arrow.py | 19 ++++++++++++++++++- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 4d887ecd1510f..83ed54c42a23c 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -2192,11 +2192,11 @@ def _str_rstrip(self, to_strip=None): return type(self)(result) def _str_removeprefix(self, prefix: str): - # TODO: Should work once https://github.com/apache/arrow/issues/14991 is fixed - # starts_with = pc.starts_with(self._pa_array, pattern=prefix) - # removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix)) - # result = pc.if_else(starts_with, removed, self._pa_array) - # return type(self)(result) + if not pa_version_under13p0: + starts_with = pc.starts_with(self._pa_array, pattern=prefix) + removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix)) + result = pc.if_else(starts_with, removed, self._pa_array) + return type(self)(result) predicate = lambda val: val.removeprefix(prefix) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 60d7ae1b998f5..338724d405ad8 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -15,7 +15,10 @@ lib, missing as libmissing, ) -from pandas.compat import pa_version_under7p0 +from pandas.compat import ( + pa_version_under7p0, + pa_version_under13p0, +) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( @@ -446,6 +449,20 @@ def _str_rstrip(self, to_strip=None): result = pc.utf8_rtrim(self._pa_array, characters=to_strip) return type(self)(result) + def _str_removeprefix(self, prefix: str): + if not pa_version_under13p0: + starts_with = pc.starts_with(self._pa_array, pattern=prefix) + removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix)) + result = pc.if_else(starts_with, removed, self._pa_array) + return type(self)(result) + return super()._str_removeprefix(prefix) + + def _str_removesuffix(self, suffix: str): + ends_with = pc.ends_with(self._pa_array, pattern=suffix) + removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix)) + result = pc.if_else(ends_with, removed, self._pa_array) + return type(self)(result) + def _str_count(self, pat: str, flags: int = 0): if flags: return super()._str_count(pat, flags) From a7005e0343cdfa682593400ed96fc72ddad629a1 Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Wed, 6 Sep 2023 13:12:29 -0400 Subject: [PATCH 25/93] BUG: merge with left and/or right empty returning mis-ordered columns (#55028) --- doc/source/whatsnew/v2.2.0.rst | 2 +- pandas/core/reshape/merge.py | 7 +--- pandas/tests/reshape/merge/test_merge.py | 47 +++++++++++++++++------- 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index bd15d5fa085e9..4f38d420a53b4 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -246,7 +246,7 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ -- +- Bug in :func:`merge` returning columns in incorrect order when left and/or right is empty (:issue:`51929`) - Sparse diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 5b07a0010acdd..6d1ff07e07c76 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1272,12 +1272,7 @@ def _get_merge_keys( # work-around for merge_asof(right_index=True) right_keys.append(right.index._values) if lk is not None and lk == rk: # FIXME: what about other NAs? - # avoid key upcast in corner case (length-0) - lk = cast(Hashable, lk) - if len(left) > 0: - right_drop.append(rk) - else: - left_drop.append(lk) + right_drop.append(rk) else: rk = cast(ArrayLike, rk) right_keys.append(rk) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 4659c16909ed7..37ccfddfc82cd 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -582,11 +582,11 @@ def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2): df_empty = df[:0] expected = DataFrame( { - "value_x": Series(dtype=df.dtypes["value"]), "key": Series(dtype=df.dtypes["key"]), + "value_x": Series(dtype=df.dtypes["value"]), "value_y": Series(dtype=df.dtypes["value"]), }, - columns=["value_x", "key", "value_y"], + columns=["key", "value_x", "value_y"], ) actual = df_empty.merge(df, on="key") tm.assert_frame_equal(actual, expected) @@ -889,13 +889,13 @@ def test_merge_on_datetime64tz_empty(self): result = left.merge(right, on="date") expected = DataFrame( { + "date": Series(dtype=dtz), "value_x": Series(dtype=float), "date2_x": Series(dtype=dtz), - "date": Series(dtype=dtz), "value_y": Series(dtype=float), "date2_y": Series(dtype=dtz), }, - columns=["value_x", "date2_x", "date", "value_y", "date2_y"], + columns=["date", "value_x", "date2_x", "value_y", "date2_y"], ) tm.assert_frame_equal(result, expected) @@ -1827,11 +1827,9 @@ def test_merge_empty(self, left_empty, how, exp): if exp == "left": expected = DataFrame({"A": [2, 1], "B": [3, 4], "C": [np.nan, np.nan]}) elif exp == "right": - expected = DataFrame({"B": [np.nan], "A": [1], "C": [5]}) + expected = DataFrame({"A": [1], "B": [np.nan], "C": [5]}) elif exp == "empty": expected = DataFrame(columns=["A", "B", "C"], dtype="int64") - if left_empty: - expected = expected[["B", "A", "C"]] elif exp == "empty_cross": expected = DataFrame(columns=["A_x", "B", "A_y", "C"], dtype="int64") @@ -2481,14 +2479,12 @@ def test_merge_multiindex_columns(): result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf))) # Constructing the expected results - expected_labels = [letter + l_suf for letter in letters] + [ - letter + r_suf for letter in letters - ] - expected_index = MultiIndex.from_product( - [expected_labels, numbers], names=["outer", "inner"] - ) + tuples = [(letter + l_suf, num) for letter in letters for num in numbers] + tuples += [("id", "")] + tuples += [(letter + r_suf, num) for letter in letters for num in numbers] + + expected_index = MultiIndex.from_tuples(tuples, names=["outer", "inner"]) expected = DataFrame(columns=expected_index) - expected["id"] = "" tm.assert_frame_equal(result, expected) @@ -2959,3 +2955,26 @@ def test_merge_arrow_string_index(any_string_dtype): {"a": Series(["a", "b"], dtype=any_string_dtype), "b": [1, np.nan]} ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("left_empty", [True, False]) +@pytest.mark.parametrize("right_empty", [True, False]) +def test_merge_empty_frames_column_order(left_empty, right_empty): + # GH 51929 + df1 = DataFrame(1, index=[0], columns=["A", "B"]) + df2 = DataFrame(1, index=[0], columns=["A", "C", "D"]) + + if left_empty: + df1 = df1.iloc[:0] + if right_empty: + df2 = df2.iloc[:0] + + result = merge(df1, df2, on=["A"], how="outer") + expected = DataFrame(1, index=[0], columns=["A", "B", "C", "D"]) + if left_empty and right_empty: + expected = expected.iloc[:0] + elif left_empty: + expected.loc[:, "B"] = np.nan + elif right_empty: + expected.loc[:, ["C", "D"]] = np.nan + tm.assert_frame_equal(result, expected) From e5f81ac8a2645316a42db6348d2e5dc699f10783 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Wed, 6 Sep 2023 13:16:27 -0400 Subject: [PATCH 26/93] TYP: fix a few types (#54976) * TYP: fix a few types * namespace test * read_fwf overloads * Revert "namespace test" This reverts commit 0f72079f229db7e243784ee65c2e968db5f7e2ff. * revert util and move kwds * isort --- pandas/core/frame.py | 24 ++++++++++++------ pandas/core/generic.py | 7 ++++-- pandas/io/excel/_base.py | 31 +++++++++-------------- pandas/io/formats/excel.py | 4 +-- pandas/io/json/_json.py | 5 ++-- pandas/io/parsers/readers.py | 49 ++++++++++++++++++++++++++++++++++++ 6 files changed, 86 insertions(+), 34 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4bfa8a4415785..a731cdbf99b0e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1926,11 +1926,17 @@ def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., + index: bool = ..., ) -> dict: ... @overload - def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: + def to_dict( + self, + orient: Literal["records"], + into: type[dict] = ..., + index: bool = ..., + ) -> list[dict]: ... @deprecate_nonkeyword_arguments( @@ -11297,7 +11303,7 @@ def _reduce_axis1(self, name: str, func, skipna: bool) -> Series: def any( # type: ignore[override] self, *, - axis: Axis = 0, + axis: Axis | None = 0, bool_only: bool = False, skipna: bool = True, **kwargs, @@ -11312,7 +11318,7 @@ def any( # type: ignore[override] @doc(make_doc("all", ndim=2)) def all( self, - axis: Axis = 0, + axis: Axis | None = 0, bool_only: bool = False, skipna: bool = True, **kwargs, @@ -11711,6 +11717,7 @@ def quantile( axis: Axis = ..., numeric_only: bool = ..., interpolation: QuantileInterpolation = ..., + method: Literal["single", "table"] = ..., ) -> Series: ... @@ -11721,6 +11728,7 @@ def quantile( axis: Axis = ..., numeric_only: bool = ..., interpolation: QuantileInterpolation = ..., + method: Literal["single", "table"] = ..., ) -> Series | DataFrame: ... @@ -11731,6 +11739,7 @@ def quantile( axis: Axis = ..., numeric_only: bool = ..., interpolation: QuantileInterpolation = ..., + method: Literal["single", "table"] = ..., ) -> Series | DataFrame: ... @@ -11830,11 +11839,10 @@ def quantile( if not is_list_like(q): # BlockManager.quantile expects listlike, so we wrap and unwrap here - # error: List item 0 has incompatible type "Union[float, Union[Union[ - # ExtensionArray, ndarray[Any, Any]], Index, Series], Sequence[float]]"; - # expected "float" - res_df = self.quantile( # type: ignore[call-overload] - [q], + # error: List item 0 has incompatible type "float | ExtensionArray | + # ndarray[Any, Any] | Index | Series | Sequence[float]"; expected "float" + res_df = self.quantile( + [q], # type: ignore[list-item] axis=axis, numeric_only=numeric_only, interpolation=interpolation, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8c1406fc305e3..975fbaf59df5c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11827,7 +11827,7 @@ def _logical_func( self, name: str, func, - axis: Axis = 0, + axis: Axis | None = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, @@ -11840,7 +11840,10 @@ def _logical_func( res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) - return res._logical_func(name, func, skipna=skipna, **kwargs) + # error: Item "bool" of "Series | bool" has no attribute "_logical_func" + return res._logical_func( # type: ignore[union-attr] + name, func, skipna=skipna, **kwargs + ) elif axis is None: axis = 0 diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 9ffbfb9f1149f..b4b0f29019c31 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -1,6 +1,5 @@ from __future__ import annotations -import abc from collections.abc import ( Hashable, Iterable, @@ -549,7 +548,7 @@ def read_excel( _WorkbookT = TypeVar("_WorkbookT") -class BaseExcelReader(Generic[_WorkbookT], metaclass=abc.ABCMeta): +class BaseExcelReader(Generic[_WorkbookT]): book: _WorkbookT def __init__( @@ -589,13 +588,11 @@ def __init__( ) @property - @abc.abstractmethod def _workbook_class(self) -> type[_WorkbookT]: - pass + raise NotImplementedError - @abc.abstractmethod def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT: - pass + raise NotImplementedError def close(self) -> None: if hasattr(self, "book"): @@ -611,21 +608,17 @@ def close(self) -> None: self.handles.close() @property - @abc.abstractmethod def sheet_names(self) -> list[str]: - pass + raise NotImplementedError - @abc.abstractmethod def get_sheet_by_name(self, name: str): - pass + raise NotImplementedError - @abc.abstractmethod def get_sheet_by_index(self, index: int): - pass + raise NotImplementedError - @abc.abstractmethod def get_sheet_data(self, sheet, rows: int | None = None): - pass + raise NotImplementedError def raise_if_bad_sheet_by_index(self, index: int) -> None: n_sheets = len(self.sheet_names) @@ -940,7 +933,7 @@ def parse( @doc(storage_options=_shared_docs["storage_options"]) -class ExcelWriter(Generic[_WorkbookT], metaclass=abc.ABCMeta): +class ExcelWriter(Generic[_WorkbookT]): """ Class for writing DataFrame objects into excel sheets. @@ -1178,20 +1171,19 @@ def engine(self) -> str: return self._engine @property - @abc.abstractmethod def sheets(self) -> dict[str, Any]: """Mapping of sheet names to sheet objects.""" + raise NotImplementedError @property - @abc.abstractmethod def book(self) -> _WorkbookT: """ Book instance. Class type will depend on the engine used. This attribute can be used to access engine-specific features. """ + raise NotImplementedError - @abc.abstractmethod def _write_cells( self, cells, @@ -1214,12 +1206,13 @@ def _write_cells( freeze_panes: int tuple of length 2 contains the bottom-most row and right-most column to freeze """ + raise NotImplementedError - @abc.abstractmethod def _save(self) -> None: """ Save workbook to disk. """ + raise NotImplementedError def __init__( self, diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 9970d465ced9d..b344d9849f16c 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -941,9 +941,7 @@ def write( if isinstance(writer, ExcelWriter): need_save = False else: - # error: Cannot instantiate abstract class 'ExcelWriter' with abstract - # attributes 'engine', 'save', 'supported_extensions' and 'write_cells' - writer = ExcelWriter( # type: ignore[abstract] + writer = ExcelWriter( writer, engine=engine, storage_options=storage_options, diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 833f4986b6da6..52ea072d1483f 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -82,6 +82,7 @@ JSONEngine, JSONSerializable, ReadBuffer, + Self, StorageOptions, WriteBuffer, ) @@ -1056,7 +1057,7 @@ def close(self) -> None: if self.handles is not None: self.handles.close() - def __iter__(self: JsonReader[FrameSeriesStrT]) -> JsonReader[FrameSeriesStrT]: + def __iter__(self) -> Self: return self @overload @@ -1099,7 +1100,7 @@ def __next__(self) -> DataFrame | Series: else: return obj - def __enter__(self) -> JsonReader[FrameSeriesStrT]: + def __enter__(self) -> Self: return self def __exit__( diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 10d3ab230cb9d..e0f171035e89e 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1307,6 +1307,51 @@ def read_table( return _read(filepath_or_buffer, kwds) +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: Literal[True], + chunksize: int | None = ..., + **kwds, +) -> TextFileReader: + ... + + +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: bool = ..., + chunksize: int, + **kwds, +) -> TextFileReader: + ... + + +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + **kwds, +) -> DataFrame: + ... + + def read_fwf( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, @@ -1314,6 +1359,8 @@ def read_fwf( widths: Sequence[int] | None = None, infer_nrows: int = 100, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + iterator: bool = False, + chunksize: int | None = None, **kwds, ) -> DataFrame | TextFileReader: r""" @@ -1412,6 +1459,8 @@ def read_fwf( kwds["colspecs"] = colspecs kwds["infer_nrows"] = infer_nrows kwds["engine"] = "python-fwf" + kwds["iterator"] = iterator + kwds["chunksize"] = chunksize check_dtype_backend(dtype_backend) kwds["dtype_backend"] = dtype_backend From f87b7e309ff34541158fe06cc0c915c09fa37c37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Lucas=20Mayer?= Date: Wed, 6 Sep 2023 14:17:32 -0300 Subject: [PATCH 27/93] TST: add test case of ngroup with NaN value (#54966) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add test case of ngroup with nan value Co-authored-by: José Lucas Silva Mayer Co-authored-by: Willian Wang * fix linter issues Co-authored-by: José Lucas Silva Mayer Co-authored-by: Willian Wang * use Categorical object instead of pd.Categorical Co-authored-by: José Lucas Silva Mayer Co-authored-by: Willian Wang * use native assert function Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> * test full result of ngroup method Signed-off-by: José Lucas Silva Mayer --------- Signed-off-by: José Lucas Silva Mayer Co-authored-by: Willian Wang Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- pandas/tests/groupby/test_groupby.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 1e6d220199e22..999a03d18644d 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -3189,6 +3189,14 @@ def test_depr_get_group_len_1_list_likes(test_series, kwarg, value, name, warn): tm.assert_equal(result, expected) +def test_groupby_ngroup_with_nan(): + # GH#50100 + df = DataFrame({"a": Categorical([np.nan]), "b": [1]}) + result = df.groupby(["a", "b"], dropna=False, observed=False).ngroup() + expected = Series([0]) + tm.assert_series_equal(result, expected) + + def test_get_group_axis_1(): # GH#54858 df = DataFrame( From 6cb1da95fcd2df9689f5c957a85c299d67b9aec9 Mon Sep 17 00:00:00 2001 From: Abdullah Ihsan Secer Date: Wed, 6 Sep 2023 18:19:30 +0100 Subject: [PATCH 28/93] TST: Use (unused) window parameter of test_freq_window_not_implemented (#54947) * Use window parameter of test_freq_window_not_implemented * Revert change in exception message --- pandas/tests/window/test_rolling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index a02f132e540ac..3fe922539780d 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -100,9 +100,9 @@ def test_freq_window_not_implemented(window): index=date_range("2015-12-24", periods=10, freq="D"), ) with pytest.raises( - NotImplementedError, match="step is not supported with frequency windows" + NotImplementedError, match="^step (not implemented|is not supported)" ): - df.rolling("3D", step=3) + df.rolling(window, step=3).sum() @pytest.mark.parametrize("agg", ["cov", "corr"]) From 3e1dc77866d3313f85564e7c67f8e6f7339c2cc6 Mon Sep 17 00:00:00 2001 From: David Poznik Date: Wed, 6 Sep 2023 13:06:14 -0700 Subject: [PATCH 29/93] DOC: Add missing word to `IndexOpsMixin.array` docstring (#55034) Add missing word to `IndexOpsMixin.array` docstring --- pandas/core/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/core/base.py b/pandas/core/base.py index d973f8f5fe35a..3026189e747bb 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -485,8 +485,8 @@ def array(self) -> ExtensionArray: types, this is the actual array. For NumPy native types, this is a thin (no copy) wrapper around :class:`numpy.ndarray`. - ``.array`` differs ``.values`` which may require converting the - data to a different form. + ``.array`` differs from ``.values``, which may require converting + the data to a different form. See Also -------- From 1aa885730ae3e01bb7123059d59220e67012343b Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 6 Sep 2023 10:07:06 -1000 Subject: [PATCH 30/93] TST: Use more explicit object names (#55033) --- pandas/tests/frame/methods/test_reindex.py | 24 +++++--- pandas/tests/indexes/ranges/test_range.py | 55 ++++++++++--------- pandas/tests/indexing/test_categorical.py | 11 ++-- .../indexing/test_chaining_and_caching.py | 6 +- pandas/tests/io/formats/test_info.py | 6 +- pandas/tests/io/formats/test_series_info.py | 6 +- pandas/tests/reshape/merge/test_merge.py | 18 +++--- pandas/tests/reshape/test_cut.py | 12 ++-- pandas/tests/reshape/test_pivot.py | 8 ++- pandas/tests/reshape/test_qcut.py | 10 ++-- pandas/tests/test_algos.py | 4 +- 11 files changed, 92 insertions(+), 68 deletions(-) diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index 0858e33a989b7..56bdd2fc664cc 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -26,7 +26,7 @@ isna, ) import pandas._testing as tm -from pandas.api.types import CategoricalDtype as CDT +from pandas.api.types import CategoricalDtype class TestReindexSetIndex: @@ -1082,7 +1082,9 @@ def test_reindex_with_categoricalindex(self): { "A": np.arange(3, dtype="int64"), }, - index=CategoricalIndex(list("abc"), dtype=CDT(list("cabe")), name="B"), + index=CategoricalIndex( + list("abc"), dtype=CategoricalDtype(list("cabe")), name="B" + ), ) # reindexing @@ -1111,13 +1113,13 @@ def test_reindex_with_categoricalindex(self): result = df.reindex(Categorical(["a", "e"], categories=cats)) expected = DataFrame( - {"A": [0, np.nan], "B": Series(list("ae")).astype(CDT(cats))} + {"A": [0, np.nan], "B": Series(list("ae")).astype(CategoricalDtype(cats))} ).set_index("B") tm.assert_frame_equal(result, expected, check_index_type=True) result = df.reindex(Categorical(["a"], categories=cats)) expected = DataFrame( - {"A": [0], "B": Series(list("a")).astype(CDT(cats))} + {"A": [0], "B": Series(list("a")).astype(CategoricalDtype(cats))} ).set_index("B") tm.assert_frame_equal(result, expected, check_index_type=True) @@ -1138,13 +1140,19 @@ def test_reindex_with_categoricalindex(self): # give back the type of categorical that we received result = df.reindex(Categorical(["a", "e"], categories=cats, ordered=True)) expected = DataFrame( - {"A": [0, np.nan], "B": Series(list("ae")).astype(CDT(cats, ordered=True))} + { + "A": [0, np.nan], + "B": Series(list("ae")).astype(CategoricalDtype(cats, ordered=True)), + } ).set_index("B") tm.assert_frame_equal(result, expected, check_index_type=True) result = df.reindex(Categorical(["a", "d"], categories=["a", "d"])) expected = DataFrame( - {"A": [0, np.nan], "B": Series(list("ad")).astype(CDT(["a", "d"]))} + { + "A": [0, np.nan], + "B": Series(list("ad")).astype(CategoricalDtype(["a", "d"])), + } ).set_index("B") tm.assert_frame_equal(result, expected, check_index_type=True) @@ -1152,7 +1160,9 @@ def test_reindex_with_categoricalindex(self): { "A": np.arange(6, dtype="int64"), }, - index=CategoricalIndex(list("aabbca"), dtype=CDT(list("cabe")), name="B"), + index=CategoricalIndex( + list("aabbca"), dtype=CategoricalDtype(list("cabe")), name="B" + ), ) # passed duplicate indexers are not allowed msg = "cannot reindex on an axis with duplicate labels" diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 5f137df281fa3..132704434829e 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -10,9 +10,6 @@ ) import pandas._testing as tm -# aliases to make some tests easier to read -RI = RangeIndex - class TestRangeIndex: @pytest.fixture @@ -507,25 +504,31 @@ def test_len_specialised(self, step): @pytest.mark.parametrize( "indices, expected", [ - ([RI(1, 12, 5)], RI(1, 12, 5)), - ([RI(0, 6, 4)], RI(0, 6, 4)), - ([RI(1, 3), RI(3, 7)], RI(1, 7)), - ([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)), - ([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)), - ([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)), - ([RI(-4, -8), RI(-8, -12)], RI(0, 0)), - ([RI(-4, -8), RI(3, -4)], RI(0, 0)), - ([RI(-4, -8), RI(3, 5)], RI(3, 5)), - ([RI(-4, -2), RI(3, 5)], Index([-4, -3, 3, 4])), - ([RI(-2), RI(3, 5)], RI(3, 5)), - ([RI(2), RI(2)], Index([0, 1, 0, 1])), - ([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)), - ([RI(2), RI(3, 5), RI(5, 8, 4)], Index([0, 1, 3, 4, 5])), - ([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)), - ([RI(3), Index([-1, 3, 15])], Index([0, 1, 2, -1, 3, 15])), - ([RI(3), Index([-1, 3.1, 15.0])], Index([0, 1, 2, -1, 3.1, 15.0])), - ([RI(3), Index(["a", None, 14])], Index([0, 1, 2, "a", None, 14])), - ([RI(3, 1), Index(["a", None, 14])], Index(["a", None, 14])), + ([RangeIndex(1, 12, 5)], RangeIndex(1, 12, 5)), + ([RangeIndex(0, 6, 4)], RangeIndex(0, 6, 4)), + ([RangeIndex(1, 3), RangeIndex(3, 7)], RangeIndex(1, 7)), + ([RangeIndex(1, 5, 2), RangeIndex(5, 6)], RangeIndex(1, 6, 2)), + ([RangeIndex(1, 3, 2), RangeIndex(4, 7, 3)], RangeIndex(1, 7, 3)), + ([RangeIndex(-4, 3, 2), RangeIndex(4, 7, 2)], RangeIndex(-4, 7, 2)), + ([RangeIndex(-4, -8), RangeIndex(-8, -12)], RangeIndex(0, 0)), + ([RangeIndex(-4, -8), RangeIndex(3, -4)], RangeIndex(0, 0)), + ([RangeIndex(-4, -8), RangeIndex(3, 5)], RangeIndex(3, 5)), + ([RangeIndex(-4, -2), RangeIndex(3, 5)], Index([-4, -3, 3, 4])), + ([RangeIndex(-2), RangeIndex(3, 5)], RangeIndex(3, 5)), + ([RangeIndex(2), RangeIndex(2)], Index([0, 1, 0, 1])), + ([RangeIndex(2), RangeIndex(2, 5), RangeIndex(5, 8, 4)], RangeIndex(0, 6)), + ( + [RangeIndex(2), RangeIndex(3, 5), RangeIndex(5, 8, 4)], + Index([0, 1, 3, 4, 5]), + ), + ( + [RangeIndex(-2, 2), RangeIndex(2, 5), RangeIndex(5, 8, 4)], + RangeIndex(-2, 6), + ), + ([RangeIndex(3), Index([-1, 3, 15])], Index([0, 1, 2, -1, 3, 15])), + ([RangeIndex(3), Index([-1, 3.1, 15.0])], Index([0, 1, 2, -1, 3.1, 15.0])), + ([RangeIndex(3), Index(["a", None, 14])], Index([0, 1, 2, "a", None, 14])), + ([RangeIndex(3, 1), Index(["a", None, 14])], Index(["a", None, 14])), ], ) def test_append(self, indices, expected): @@ -567,7 +570,7 @@ def test_format_empty(self): assert empty_idx.format(name=True) == [""] @pytest.mark.parametrize( - "RI", + "ri", [ RangeIndex(0, -1, -1), RangeIndex(0, 1, 1), @@ -576,10 +579,10 @@ def test_format_empty(self): RangeIndex(-3, -5, -2), ], ) - def test_append_len_one(self, RI): + def test_append_len_one(self, ri): # GH39401 - result = RI.append([]) - tm.assert_index_equal(result, RI, exact=True) + result = ri.append([]) + tm.assert_index_equal(result, ri, exact=True) @pytest.mark.parametrize("base", [RangeIndex(0, 2), Index([0, 1])]) def test_isin_range(self, base): diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index b45d197af332e..d3a6d4bf7cebf 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -16,7 +16,6 @@ Timestamp, ) import pandas._testing as tm -from pandas.api.types import CategoricalDtype as CDT @pytest.fixture @@ -25,7 +24,9 @@ def df(): { "A": np.arange(6, dtype="int64"), }, - index=CategoricalIndex(list("aabbca"), dtype=CDT(list("cab")), name="B"), + index=CategoricalIndex( + list("aabbca"), dtype=CategoricalDtype(list("cab")), name="B" + ), ) @@ -35,13 +36,15 @@ def df2(): { "A": np.arange(6, dtype="int64"), }, - index=CategoricalIndex(list("aabbca"), dtype=CDT(list("cabe")), name="B"), + index=CategoricalIndex( + list("aabbca"), dtype=CategoricalDtype(list("cabe")), name="B" + ), ) class TestCategoricalIndex: def test_loc_scalar(self, df): - dtype = CDT(list("cab")) + dtype = CategoricalDtype(list("cab")) result = df.loc["a"] bidx = Series(list("aaa"), name="B").astype(dtype) assert bidx.dtype == dtype diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index f36fdf0d36ea9..7353b5ef76ba3 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -1,4 +1,4 @@ -from string import ascii_letters as letters +from string import ascii_letters import numpy as np import pytest @@ -24,9 +24,9 @@ def random_text(nobs=100): # Construct a DataFrame where each row is a random slice from 'letters' - idxs = np.random.default_rng(2).integers(len(letters), size=(nobs, 2)) + idxs = np.random.default_rng(2).integers(len(ascii_letters), size=(nobs, 2)) idxs.sort(axis=1) - strings = [letters[x[0] : x[1]] for x in idxs] + strings = [ascii_letters[x[0] : x[1]] for x in idxs] return DataFrame(strings, columns=["letters"]) diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py index 73de2b068b699..6c3bf01cb1857 100644 --- a/pandas/tests/io/formats/test_info.py +++ b/pandas/tests/io/formats/test_info.py @@ -1,6 +1,6 @@ from io import StringIO import re -from string import ascii_uppercase as uppercase +from string import ascii_uppercase import sys import textwrap @@ -452,9 +452,9 @@ def memory_usage(f): return f.memory_usage(deep=True).sum() N = 100 - M = len(uppercase) + M = len(ascii_uppercase) index = MultiIndex.from_product( - [list(uppercase), date_range("20160101", periods=N)], + [list(ascii_uppercase), date_range("20160101", periods=N)], names=["id", "date"], ) df = DataFrame( diff --git a/pandas/tests/io/formats/test_series_info.py b/pandas/tests/io/formats/test_series_info.py index 02827ee25042a..29dd704f6efa9 100644 --- a/pandas/tests/io/formats/test_series_info.py +++ b/pandas/tests/io/formats/test_series_info.py @@ -1,5 +1,5 @@ from io import StringIO -from string import ascii_uppercase as uppercase +from string import ascii_uppercase import textwrap import numpy as np @@ -165,9 +165,9 @@ def test_info_memory_usage_bug_on_multiindex(): # GH 14308 # memory usage introspection should not materialize .values N = 100 - M = len(uppercase) + M = len(ascii_uppercase) index = MultiIndex.from_product( - [list(uppercase), date_range("20160101", periods=N)], + [list(ascii_uppercase), date_range("20160101", periods=N)], names=["id", "date"], ) s = Series(np.random.default_rng(2).standard_normal(N * M), index=index) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 37ccfddfc82cd..d889ae2e4806b 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -26,7 +26,6 @@ TimedeltaIndex, ) import pandas._testing as tm -from pandas.api.types import CategoricalDtype as CDT from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import ( MergeError, @@ -1842,7 +1841,7 @@ def left(): { "X": Series( np.random.default_rng(2).choice(["foo", "bar"], size=(10,)) - ).astype(CDT(["foo", "bar"])), + ).astype(CategoricalDtype(["foo", "bar"])), "Y": np.random.default_rng(2).choice(["one", "two", "three"], size=(10,)), } ) @@ -1851,7 +1850,10 @@ def left(): @pytest.fixture def right(): return DataFrame( - {"X": Series(["foo", "bar"]).astype(CDT(["foo", "bar"])), "Z": [1, 2]} + { + "X": Series(["foo", "bar"]).astype(CategoricalDtype(["foo", "bar"])), + "Z": [1, 2], + } ) @@ -2002,8 +2004,8 @@ def test_other_columns(self, left, right): "change", [ lambda x: x, - lambda x: x.astype(CDT(["foo", "bar", "bah"])), - lambda x: x.astype(CDT(ordered=True)), + lambda x: x.astype(CategoricalDtype(["foo", "bar", "bah"])), + lambda x: x.astype(CategoricalDtype(ordered=True)), ], ) def test_dtype_on_merged_different(self, change, join_type, left, right): @@ -2110,11 +2112,13 @@ def test_merging_with_bool_or_int_cateorical_column( # GH 17187 # merging with a boolean/int categorical column df1 = DataFrame({"id": [1, 2, 3, 4], "cat": category_column}) - df1["cat"] = df1["cat"].astype(CDT(categories, ordered=ordered)) + df1["cat"] = df1["cat"].astype(CategoricalDtype(categories, ordered=ordered)) df2 = DataFrame({"id": [2, 4], "num": [1, 9]}) result = df1.merge(df2) expected = DataFrame({"id": [2, 4], "cat": expected_categories, "num": [1, 9]}) - expected["cat"] = expected["cat"].astype(CDT(categories, ordered=ordered)) + expected["cat"] = expected["cat"].astype( + CategoricalDtype(categories, ordered=ordered) + ) tm.assert_frame_equal(expected, result) def test_merge_on_int_array(self): diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py index 81b466b059702..3a284f7732ac1 100644 --- a/pandas/tests/reshape/test_cut.py +++ b/pandas/tests/reshape/test_cut.py @@ -21,7 +21,7 @@ to_datetime, ) import pandas._testing as tm -from pandas.api.types import CategoricalDtype as CDT +from pandas.api.types import CategoricalDtype import pandas.core.reshape.tile as tmod @@ -359,7 +359,7 @@ def test_cut_return_intervals(): IntervalIndex.from_breaks(exp_bins, closed="right").take( [0, 0, 0, 1, 1, 1, 2, 2, 2] ) - ).astype(CDT(ordered=True)) + ).astype(CategoricalDtype(ordered=True)) tm.assert_series_equal(result, expected) @@ -370,7 +370,7 @@ def test_series_ret_bins(): expected = Series( IntervalIndex.from_breaks([-0.003, 1.5, 3], closed="right").repeat(2) - ).astype(CDT(ordered=True)) + ).astype(CategoricalDtype(ordered=True)) tm.assert_series_equal(result, expected) @@ -445,7 +445,7 @@ def test_datetime_bin(conv): Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2])), ] ) - ).astype(CDT(ordered=True)) + ).astype(CategoricalDtype(ordered=True)) bins = [conv(v) for v in bin_data] result = Series(cut(data, bins=bins)) @@ -491,7 +491,7 @@ def test_datetime_cut(data): ), ] ) - ).astype(CDT(ordered=True)) + ).astype(CategoricalDtype(ordered=True)) tm.assert_series_equal(Series(result), expected) @@ -534,7 +534,7 @@ def test_datetime_tz_cut(bins, box): ), ] ) - ).astype(CDT(ordered=True)) + ).astype(CategoricalDtype(ordered=True)) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index c43fd05fd5501..28ad133a0c8d6 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -23,7 +23,7 @@ date_range, ) import pandas._testing as tm -from pandas.api.types import CategoricalDtype as CDT +from pandas.api.types import CategoricalDtype from pandas.core.reshape import reshape as reshape_lib from pandas.core.reshape.pivot import pivot_table @@ -219,10 +219,12 @@ def test_pivot_table_dropna_categoricals(self, dropna): } ) - df["A"] = df["A"].astype(CDT(categories, ordered=False)) + df["A"] = df["A"].astype(CategoricalDtype(categories, ordered=False)) result = df.pivot_table(index="B", columns="A", values="C", dropna=dropna) expected_columns = Series(["a", "b", "c"], name="A") - expected_columns = expected_columns.astype(CDT(categories, ordered=False)) + expected_columns = expected_columns.astype( + CategoricalDtype(categories, ordered=False) + ) expected_index = Series([1, 2, 3], name="B") expected = DataFrame( [[0.0, 3.0, 6.0], [1.0, 4.0, 7.0], [2.0, 5.0, 8.0]], diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py index 907eeca6e9b5e..bcfbe5ed1aa20 100644 --- a/pandas/tests/reshape/test_qcut.py +++ b/pandas/tests/reshape/test_qcut.py @@ -20,7 +20,7 @@ timedelta_range, ) import pandas._testing as tm -from pandas.api.types import CategoricalDtype as CDT +from pandas.api.types import CategoricalDtype from pandas.tseries.offsets import ( Day, @@ -129,7 +129,9 @@ def test_qcut_return_intervals(): exp_levels = np.array( [Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)] ) - exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True)) + exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype( + CategoricalDtype(ordered=True) + ) tm.assert_series_equal(res, exp) @@ -199,7 +201,7 @@ def test_single_quantile(data, start, end, length, labels): if labels is None: intervals = IntervalIndex([Interval(start, end)] * length, closed="right") - expected = Series(intervals).astype(CDT(ordered=True)) + expected = Series(intervals).astype(CategoricalDtype(ordered=True)) else: expected = Series([0] * length, dtype=np.intp) @@ -249,7 +251,7 @@ def test_datetime_tz_qcut(bins): ), ] ) - ).astype(CDT(ordered=True)) + ).astype(CategoricalDtype(ordered=True)) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index cb703d3439d44..661290fb00d13 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -17,7 +17,7 @@ is_integer_dtype, is_object_dtype, ) -from pandas.core.dtypes.dtypes import CategoricalDtype as CDT +from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd from pandas import ( @@ -1182,7 +1182,7 @@ def test_value_counts(self): with tm.assert_produces_warning(FutureWarning, match=msg): result = algos.value_counts(factor) breaks = [-1.606, -1.018, -0.431, 0.155, 0.741] - index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True)) + index = IntervalIndex.from_breaks(breaks).astype(CategoricalDtype(ordered=True)) expected = Series([1, 0, 2, 1], index=index, name="count") tm.assert_series_equal(result.sort_index(), expected.sort_index()) From 88683e9dbc4ba2fe7b1185d88b538e2bbb2d3601 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Thu, 7 Sep 2023 00:40:23 +0200 Subject: [PATCH 31/93] BUG: pct_change showing unnecessary FutureWarning (#54983) * BUG: pct_change showing unnecessary FutureWarning * Fix df case * Fix --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/generic.py | 24 ++++++++++++------- pandas/tests/frame/methods/test_pct_change.py | 18 ++++++++++++++ .../tests/series/methods/test_pct_change.py | 8 +++++++ 4 files changed, 42 insertions(+), 9 deletions(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index b9bdb36fe0ed3..fe511b5cdec67 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -34,6 +34,7 @@ Bug fixes ~~~~~~~~~ - Fixed bug for :class:`ArrowDtype` raising ``NotImplementedError`` for fixed-size list (:issue:`55000`) - Fixed bug in :meth:`DataFrame.stack` with ``future_stack=True`` and columns a non-:class:`MultiIndex` consisting of tuples (:issue:`54948`) +- Fixed bug in :meth:`Series.pct_change` and :meth:`DataFrame.pct_change` showing unnecessary ``FutureWarning`` (:issue:`54981`) .. --------------------------------------------------------------------------- .. _whatsnew_211.other: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 975fbaf59df5c..5c303e2a73bd7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11793,15 +11793,21 @@ def pct_change( stacklevel=find_stack_level(), ) if fill_method is lib.no_default: - if self.isna().values.any(): - warnings.warn( - "The default fill_method='pad' in " - f"{type(self).__name__}.pct_change is deprecated and will be " - "removed in a future version. Call ffill before calling " - "pct_change to retain current behavior and silence this warning.", - FutureWarning, - stacklevel=find_stack_level(), - ) + cols = self.items() if self.ndim == 2 else [(None, self)] + for _, col in cols: + mask = col.isna().values + mask = mask[np.argmax(~mask) :] + if mask.any(): + warnings.warn( + "The default fill_method='pad' in " + f"{type(self).__name__}.pct_change is deprecated and will be " + "removed in a future version. Call ffill before calling " + "pct_change to retain current behavior and silence this " + "warning.", + FutureWarning, + stacklevel=find_stack_level(), + ) + break fill_method = "pad" if limit is lib.no_default: limit = None diff --git a/pandas/tests/frame/methods/test_pct_change.py b/pandas/tests/frame/methods/test_pct_change.py index d0153da038a75..ede212ae18ae9 100644 --- a/pandas/tests/frame/methods/test_pct_change.py +++ b/pandas/tests/frame/methods/test_pct_change.py @@ -160,3 +160,21 @@ def test_pct_change_with_duplicated_indices(fill_method): index=["a", "b"] * 3, ) tm.assert_frame_equal(result, expected) + + +def test_pct_change_none_beginning_no_warning(): + # GH#54481 + df = DataFrame( + [ + [1, None], + [2, 1], + [3, 2], + [4, 3], + [5, 4], + ] + ) + result = df.pct_change() + expected = DataFrame( + {0: [np.nan, 1, 0.5, 1 / 3, 0.25], 1: [np.nan, np.nan, 1, 0.5, 1 / 3]} + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_pct_change.py b/pandas/tests/series/methods/test_pct_change.py index 4dabf7b87e2cd..6740b8756853e 100644 --- a/pandas/tests/series/methods/test_pct_change.py +++ b/pandas/tests/series/methods/test_pct_change.py @@ -107,3 +107,11 @@ def test_pct_change_with_duplicated_indices(fill_method): expected = Series([np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], index=["a", "b"] * 3) tm.assert_series_equal(result, expected) + + +def test_pct_change_no_warning_na_beginning(): + # GH#54981 + ser = Series([None, None, 1, 2, 3]) + result = ser.pct_change() + expected = Series([np.nan, np.nan, np.nan, 1, 0.5]) + tm.assert_series_equal(result, expected) From faeedade7966d6f2a5b601c26205a71362913c47 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Thu, 7 Sep 2023 00:42:37 +0200 Subject: [PATCH 32/93] ENH: Implement more string accessors through PyArrow (#54960) --- pandas/core/arrays/string_arrow.py | 31 ++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 338724d405ad8..a6838fbc73be9 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -50,6 +50,8 @@ if TYPE_CHECKING: + from collections.abc import Sequence + from pandas._typing import ( Dtype, Scalar, @@ -337,19 +339,13 @@ def _str_startswith(self, pat: str, na=None): result = pc.starts_with(self._pa_array, pattern=pat) if not isna(na): result = result.fill_null(na) - result = self._result_converter(result) - if not isna(na): - result[isna(result)] = bool(na) - return result + return self._result_converter(result) def _str_endswith(self, pat: str, na=None): result = pc.ends_with(self._pa_array, pattern=pat) if not isna(na): result = result.fill_null(na) - result = self._result_converter(result) - if not isna(na): - result[isna(result)] = bool(na) - return result + return self._result_converter(result) def _str_replace( self, @@ -368,6 +364,12 @@ def _str_replace( result = func(self._pa_array, pattern=pat, replacement=repl, max_replacements=n) return type(self)(result) + def _str_repeat(self, repeats: int | Sequence[int]): + if not isinstance(repeats, int): + return super()._str_repeat(repeats) + else: + return type(self)(pc.binary_repeat(self._pa_array, repeats)) + def _str_match( self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None ): @@ -382,6 +384,19 @@ def _str_fullmatch( pat = f"{pat}$" return self._str_match(pat, case, flags, na) + def _str_slice( + self, start: int | None = None, stop: int | None = None, step: int | None = None + ): + if stop is None: + return super()._str_slice(start, stop, step) + if start is None: + start = 0 + if step is None: + step = 1 + return type(self)( + pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step) + ) + def _str_isalnum(self): result = pc.utf8_is_alnum(self._pa_array) return self._result_converter(result) From cf6100b2aa8f2210fc60c34865587c4c24d42582 Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Thu, 7 Sep 2023 11:59:58 -0400 Subject: [PATCH 33/93] DEPR: DataFrameGroupBy.apply operating on the group keys (#54950) * DEPR: DataFrameGroupBy.apply operating on the group keys * fixups * Improvements * Add DataFrameGroupBy.resample to the whatsnew; mypy fixup * Ignore wrong parameter order * Ignore groupby.resample in docstring validation * Fixup docstring --- doc/source/user_guide/cookbook.rst | 4 +- doc/source/user_guide/groupby.rst | 14 +- doc/source/whatsnew/v0.14.0.rst | 21 +- doc/source/whatsnew/v0.18.1.rst | 93 ++++- doc/source/whatsnew/v2.2.0.rst | 2 +- pandas/core/frame.py | 26 +- pandas/core/groupby/groupby.py | 134 ++++++-- pandas/core/resample.py | 48 ++- pandas/core/reshape/pivot.py | 4 +- pandas/tests/extension/base/groupby.py | 8 +- pandas/tests/frame/test_stack_unstack.py | 4 +- pandas/tests/groupby/aggregate/test_other.py | 8 +- pandas/tests/groupby/test_apply.py | 319 ++++++++++++------ pandas/tests/groupby/test_apply_mutate.py | 32 +- pandas/tests/groupby/test_categorical.py | 13 +- pandas/tests/groupby/test_counting.py | 4 +- pandas/tests/groupby/test_function.py | 6 +- pandas/tests/groupby/test_groupby.py | 67 ++-- pandas/tests/groupby/test_groupby_dropna.py | 4 +- pandas/tests/groupby/test_groupby_subclass.py | 8 +- pandas/tests/groupby/test_grouping.py | 4 +- pandas/tests/groupby/test_timegrouper.py | 19 +- pandas/tests/groupby/test_value_counts.py | 9 +- .../tests/groupby/transform/test_transform.py | 12 +- pandas/tests/resample/test_datetime_index.py | 20 +- pandas/tests/resample/test_resample_api.py | 4 +- .../tests/resample/test_resampler_grouper.py | 71 +++- pandas/tests/resample/test_time_grouper.py | 14 +- pandas/tests/window/test_groupby.py | 88 +++-- scripts/validate_unwanted_patterns.py | 1 + 30 files changed, 767 insertions(+), 294 deletions(-) diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst index c0d2a14507383..002e88533ab93 100644 --- a/doc/source/user_guide/cookbook.rst +++ b/doc/source/user_guide/cookbook.rst @@ -459,7 +459,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to df # List the size of the animals with the highest weight. - df.groupby("animal").apply(lambda subf: subf["size"][subf["weight"].idxmax()]) + df.groupby("animal").apply(lambda subf: subf["size"][subf["weight"].idxmax()], include_groups=False) `Using get_group `__ @@ -482,7 +482,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to return pd.Series(["L", avg_weight, True], index=["size", "weight", "adult"]) - expected_df = gb.apply(GrowUp) + expected_df = gb.apply(GrowUp, include_groups=False) expected_df `Expanding apply diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index c28123cec4491..5dd14e243fbb3 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -420,6 +420,12 @@ This is mainly syntactic sugar for the alternative, which is much more verbose: Additionally, this method avoids recomputing the internal grouping information derived from the passed key. +You can also include the grouping columns if you want to operate on them. + +.. ipython:: python + + grouped[["A", "B"]].sum() + .. _groupby.iterating-label: Iterating through groups @@ -1053,7 +1059,7 @@ missing values with the ``ffill()`` method. ).set_index("date") df_re - df_re.groupby("group").resample("1D").ffill() + df_re.groupby("group").resample("1D", include_groups=False).ffill() .. _groupby.filter: @@ -1219,13 +1225,13 @@ the argument ``group_keys`` which defaults to ``True``. Compare .. ipython:: python - df.groupby("A", group_keys=True).apply(lambda x: x) + df.groupby("A", group_keys=True).apply(lambda x: x, include_groups=False) with .. ipython:: python - df.groupby("A", group_keys=False).apply(lambda x: x) + df.groupby("A", group_keys=False).apply(lambda x: x, include_groups=False) Numba Accelerated Routines @@ -1709,7 +1715,7 @@ column index name will be used as the name of the inserted column: result = {"b_sum": x["b"].sum(), "c_mean": x["c"].mean()} return pd.Series(result, name="metrics") - result = df.groupby("a").apply(compute_metrics) + result = df.groupby("a").apply(compute_metrics, include_groups=False) result diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst index 92c37243b7e81..9c537b3a48c74 100644 --- a/doc/source/whatsnew/v0.14.0.rst +++ b/doc/source/whatsnew/v0.14.0.rst @@ -328,13 +328,24 @@ More consistent behavior for some groupby methods: - groupby ``head`` and ``tail`` now act more like ``filter`` rather than an aggregation: - .. ipython:: python + .. code-block:: ipython - df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) - g = df.groupby('A') - g.head(1) # filters DataFrame + In [1]: df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) - g.apply(lambda x: x.head(1)) # used to simply fall-through + In [2]: g = df.groupby('A') + + In [3]: g.head(1) # filters DataFrame + Out[3]: + A B + 0 1 2 + 2 5 6 + + In [4]: g.apply(lambda x: x.head(1)) # used to simply fall-through + Out[4]: + A B + A + 1 0 1 2 + 5 2 5 6 - groupby head and tail respect column selection: diff --git a/doc/source/whatsnew/v0.18.1.rst b/doc/source/whatsnew/v0.18.1.rst index 7d9008fdbdecd..ee6a60144bc35 100644 --- a/doc/source/whatsnew/v0.18.1.rst +++ b/doc/source/whatsnew/v0.18.1.rst @@ -77,9 +77,52 @@ Previously you would have to do this to get a rolling window mean per-group: df = pd.DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) df -.. ipython:: python +.. code-block:: ipython - df.groupby("A").apply(lambda x: x.rolling(4).B.mean()) + In [1]: df.groupby("A").apply(lambda x: x.rolling(4).B.mean()) + Out[1]: + A + 1 0 NaN + 1 NaN + 2 NaN + 3 1.5 + 4 2.5 + 5 3.5 + 6 4.5 + 7 5.5 + 8 6.5 + 9 7.5 + 10 8.5 + 11 9.5 + 12 10.5 + 13 11.5 + 14 12.5 + 15 13.5 + 16 14.5 + 17 15.5 + 18 16.5 + 19 17.5 + 2 20 NaN + 21 NaN + 22 NaN + 23 21.5 + 24 22.5 + 25 23.5 + 26 24.5 + 27 25.5 + 28 26.5 + 29 27.5 + 30 28.5 + 31 29.5 + 3 32 NaN + 33 NaN + 34 NaN + 35 33.5 + 36 34.5 + 37 35.5 + 38 36.5 + 39 37.5 + Name: B, dtype: float64 Now you can do: @@ -101,15 +144,53 @@ For ``.resample(..)`` type of operations, previously you would have to: df -.. ipython:: python +.. code-block:: ipython - df.groupby("group").apply(lambda x: x.resample("1D").ffill()) + In[1]: df.groupby("group").apply(lambda x: x.resample("1D").ffill()) + Out[1]: + group val + group date + 1 2016-01-03 1 5 + 2016-01-04 1 5 + 2016-01-05 1 5 + 2016-01-06 1 5 + 2016-01-07 1 5 + 2016-01-08 1 5 + 2016-01-09 1 5 + 2016-01-10 1 6 + 2 2016-01-17 2 7 + 2016-01-18 2 7 + 2016-01-19 2 7 + 2016-01-20 2 7 + 2016-01-21 2 7 + 2016-01-22 2 7 + 2016-01-23 2 7 + 2016-01-24 2 8 Now you can do: -.. ipython:: python +.. code-block:: ipython - df.groupby("group").resample("1D").ffill() + In[1]: df.groupby("group").resample("1D").ffill() + Out[1]: + group val + group date + 1 2016-01-03 1 5 + 2016-01-04 1 5 + 2016-01-05 1 5 + 2016-01-06 1 5 + 2016-01-07 1 5 + 2016-01-08 1 5 + 2016-01-09 1 5 + 2016-01-10 1 6 + 2 2016-01-17 2 7 + 2016-01-18 2 7 + 2016-01-19 2 7 + 2016-01-20 2 7 + 2016-01-21 2 7 + 2016-01-22 2 7 + 2016-01-23 2 7 + 2016-01-24 2 8 .. _whatsnew_0181.enhancements.method_chain: diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 4f38d420a53b4..7bb4aaec0dd7c 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -146,12 +146,12 @@ Deprecations - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_pickle` except ``path``. (:issue:`54229`) - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_string` except ``buf``. (:issue:`54229`) - Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.downcasting", True)`` (:issue:`53656`) +- Deprecated including the groups in computations when using :meth:`DataFrameGroupBy.apply` and :meth:`DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`) - Deprecated not passing a tuple to :class:`DataFrameGroupBy.get_group` or :class:`SeriesGroupBy.get_group` when grouping by a length-1 list-like (:issue:`25971`) - Deprecated strings ``S``, ``U``, and ``N`` denoting units in :func:`to_timedelta` (:issue:`52536`) - Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting frequencies in :class:`Minute`, :class:`Second`, :class:`Milli`, :class:`Micro`, :class:`Nano` (:issue:`52536`) - Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting units in :class:`Timedelta` (:issue:`52536`) - Deprecated the extension test classes ``BaseNoReduceTests``, ``BaseBooleanReduceTests``, and ``BaseNumericReduceTests``, use ``BaseReduceTests`` instead (:issue:`54663`) -- .. --------------------------------------------------------------------------- .. _whatsnew_220.performance: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a731cdbf99b0e..f1fc63bc4b1ea 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8869,20 +8869,20 @@ def update( >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}) - >>> df.groupby("Animal", group_keys=True).apply(lambda x: x) - Animal Max Speed + >>> df.groupby("Animal", group_keys=True)[['Max Speed']].apply(lambda x: x) + Max Speed Animal - Falcon 0 Falcon 380.0 - 1 Falcon 370.0 - Parrot 2 Parrot 24.0 - 3 Parrot 26.0 - - >>> df.groupby("Animal", group_keys=False).apply(lambda x: x) - Animal Max Speed - 0 Falcon 380.0 - 1 Falcon 370.0 - 2 Parrot 24.0 - 3 Parrot 26.0 + Falcon 0 380.0 + 1 370.0 + Parrot 2 24.0 + 3 26.0 + + >>> df.groupby("Animal", group_keys=False)[['Max Speed']].apply(lambda x: x) + Max Speed + 0 380.0 + 1 370.0 + 2 24.0 + 3 26.0 """ ) ) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 43d200027220b..e6dd6a990d285 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -180,6 +180,19 @@ class providing the base-class of operations. A callable that takes a {input} as its first argument, and returns a dataframe, a series or a scalar. In addition the callable may take positional and keyword arguments. + include_groups : bool, default True + When True, will attempt to apply ``func`` to the groupings in + the case that they are columns of the DataFrame. If this raises a + TypeError, the result will be computed with the groupings excluded. + When False, the groupings will be excluded when applying ``func``. + + .. versionadded:: 2.2.0 + + .. deprecated:: 2.2.0 + + Setting include_groups to True is deprecated. Only the value + False will be allowed in a future version of pandas. + args, kwargs : tuple and dict Optional positional and keyword arguments to pass to ``func``. @@ -272,7 +285,7 @@ class providing the base-class of operations. each group together into a Series, including setting the index as appropriate: - >>> g1.apply(lambda x: x.C.max() - x.B.min()) + >>> g1.apply(lambda x: x.C.max() - x.B.min(), include_groups=False) A a 5 b 2 @@ -1748,7 +1761,7 @@ def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs): input="dataframe", examples=_apply_docs["dataframe_examples"] ) ) - def apply(self, func, *args, **kwargs) -> NDFrameT: + def apply(self, func, *args, include_groups: bool = True, **kwargs) -> NDFrameT: orig_func = func func = com.is_builtin_func(func) if orig_func != func: @@ -1781,10 +1794,25 @@ def f(g): else: f = func + if not include_groups: + return self._python_apply_general(f, self._obj_with_exclusions) + # ignore SettingWithCopy here in case the user mutates with option_context("mode.chained_assignment", None): try: result = self._python_apply_general(f, self._selected_obj) + if ( + not isinstance(self.obj, Series) + and self._selection is None + and self._selected_obj.shape != self._obj_with_exclusions.shape + ): + warnings.warn( + message=_apply_groupings_depr.format( + type(self).__name__, "apply" + ), + category=FutureWarning, + stacklevel=find_stack_level(), + ) except TypeError: # gh-20949 # try again, with .apply acting as a filtering @@ -3520,7 +3548,7 @@ def describe( return result @final - def resample(self, rule, *args, **kwargs) -> Resampler: + def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resampler: """ Provide resampling when using a TimeGrouper. @@ -3534,7 +3562,23 @@ def resample(self, rule, *args, **kwargs) -> Resampler: ---------- rule : str or DateOffset The offset string or object representing target grouper conversion. - *args, **kwargs + *args + Possible arguments are `how`, `fill_method`, `limit`, `kind` and + `on`, and other arguments of `TimeGrouper`. + include_groups : bool, default True + When True, will attempt to include the groupings in the operation in + the case that they are columns of the DataFrame. If this raises a + TypeError, the result will be computed with the groupings excluded. + When False, the groupings will be excluded when applying ``func``. + + .. versionadded:: 2.2.0 + + .. deprecated:: 2.2.0 + + Setting include_groups to True is deprecated. Only the value + False will be allowed in a future version of pandas. + + **kwargs Possible arguments are `how`, `fill_method`, `limit`, `kind` and `on`, and other arguments of `TimeGrouper`. @@ -3570,59 +3614,71 @@ def resample(self, rule, *args, **kwargs) -> Resampler: Downsample the DataFrame into 3 minute bins and sum the values of the timestamps falling into a bin. - >>> df.groupby('a').resample('3min').sum() - a b + >>> df.groupby('a').resample('3min', include_groups=False).sum() + b a - 0 2000-01-01 00:00:00 0 2 - 2000-01-01 00:03:00 0 1 - 5 2000-01-01 00:00:00 5 1 + 0 2000-01-01 00:00:00 2 + 2000-01-01 00:03:00 1 + 5 2000-01-01 00:00:00 1 Upsample the series into 30 second bins. - >>> df.groupby('a').resample('30s').sum() - a b + >>> df.groupby('a').resample('30s', include_groups=False).sum() + b a - 0 2000-01-01 00:00:00 0 1 - 2000-01-01 00:00:30 0 0 - 2000-01-01 00:01:00 0 1 - 2000-01-01 00:01:30 0 0 - 2000-01-01 00:02:00 0 0 - 2000-01-01 00:02:30 0 0 - 2000-01-01 00:03:00 0 1 - 5 2000-01-01 00:02:00 5 1 + 0 2000-01-01 00:00:00 1 + 2000-01-01 00:00:30 0 + 2000-01-01 00:01:00 1 + 2000-01-01 00:01:30 0 + 2000-01-01 00:02:00 0 + 2000-01-01 00:02:30 0 + 2000-01-01 00:03:00 1 + 5 2000-01-01 00:02:00 1 Resample by month. Values are assigned to the month of the period. - >>> df.groupby('a').resample('M').sum() - a b + >>> df.groupby('a').resample('M', include_groups=False).sum() + b a - 0 2000-01-31 0 3 - 5 2000-01-31 5 1 + 0 2000-01-31 3 + 5 2000-01-31 1 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. - >>> df.groupby('a').resample('3min', closed='right').sum() - a b + >>> ( + ... df.groupby('a') + ... .resample('3min', closed='right', include_groups=False) + ... .sum() + ... ) + b a - 0 1999-12-31 23:57:00 0 1 - 2000-01-01 00:00:00 0 2 - 5 2000-01-01 00:00:00 5 1 + 0 1999-12-31 23:57:00 1 + 2000-01-01 00:00:00 2 + 5 2000-01-01 00:00:00 1 Downsample the series into 3 minute bins and close the right side of the bin interval, but label each bin using the right edge instead of the left. - >>> df.groupby('a').resample('3min', closed='right', label='right').sum() - a b + >>> ( + ... df.groupby('a') + ... .resample('3min', closed='right', label='right', include_groups=False) + ... .sum() + ... ) + b a - 0 2000-01-01 00:00:00 0 1 - 2000-01-01 00:03:00 0 2 - 5 2000-01-01 00:03:00 5 1 + 0 2000-01-01 00:00:00 1 + 2000-01-01 00:03:00 2 + 5 2000-01-01 00:03:00 1 """ from pandas.core.resample import get_resampler_for_grouping - return get_resampler_for_grouping(self, rule, *args, **kwargs) + # mypy flags that include_groups could be specified via `*args` or `**kwargs` + # GH#54961 would resolve. + return get_resampler_for_grouping( # type: ignore[misc] + self, rule, *args, include_groups=include_groups, **kwargs + ) @final def rolling(self, *args, **kwargs) -> RollingGroupby: @@ -5728,3 +5784,13 @@ def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiInde mi = MultiIndex(levels=levels, codes=codes, names=[idx.name, None]) return mi + + +# GH#7155 +_apply_groupings_depr = ( + "{}.{} operated on the grouping columns. This behavior is deprecated, " + "and in a future version of pandas the grouping columns will be excluded " + "from the operation. Either pass `include_groups=False` to exclude the " + "groupings or explicitly select the grouping columns after groupby to silence " + "this warning." +) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 5ff18d8a25e36..9605bf154a8b7 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -32,7 +32,10 @@ Substitution, doc, ) -from pandas.util._exceptions import find_stack_level +from pandas.util._exceptions import ( + find_stack_level, + rewrite_warning, +) from pandas.core.dtypes.generic import ( ABCDataFrame, @@ -57,6 +60,7 @@ from pandas.core.groupby.groupby import ( BaseGroupBy, GroupBy, + _apply_groupings_depr, _pipe_template, get_groupby, ) @@ -163,6 +167,7 @@ def __init__( gpr_index: Index, group_keys: bool = False, selection=None, + include_groups: bool = True, ) -> None: self._timegrouper = timegrouper self.keys = None @@ -171,6 +176,7 @@ def __init__( self.kind = kind self.group_keys = group_keys self.as_index = True + self.include_groups = include_groups self.obj, self.ax, self._indexer = self._timegrouper._set_grouper( self._convert_obj(obj), sort=True, gpr_index=gpr_index @@ -444,7 +450,9 @@ def _groupby_and_aggregate(self, how, *args, **kwargs): # a DataFrame column, but aggregate_item_by_item operates column-wise # on Series, raising AttributeError or KeyError # (depending on whether the column lookup uses getattr/__getitem__) - result = grouped.apply(how, *args, **kwargs) + result = _apply( + grouped, how, *args, include_groups=self.include_groups, **kwargs + ) except ValueError as err: if "Must produce aggregated value" in str(err): @@ -456,15 +464,21 @@ def _groupby_and_aggregate(self, how, *args, **kwargs): # we have a non-reducing function # try to evaluate - result = grouped.apply(how, *args, **kwargs) + result = _apply( + grouped, how, *args, include_groups=self.include_groups, **kwargs + ) return self._wrap_result(result) - def _get_resampler_for_grouping(self, groupby: GroupBy, key): + def _get_resampler_for_grouping( + self, groupby: GroupBy, key, include_groups: bool = True + ): """ Return the correct class for resampling with groupby. """ - return self._resampler_for_grouping(groupby=groupby, key=key, parent=self) + return self._resampler_for_grouping( + groupby=groupby, key=key, parent=self, include_groups=include_groups + ) def _wrap_result(self, result): """ @@ -1590,6 +1604,7 @@ def __init__( groupby: GroupBy, key=None, selection: IndexLabel | None = None, + include_groups: bool = False, ) -> None: # reached via ._gotitem and _get_resampler_for_grouping @@ -1612,6 +1627,7 @@ def __init__( self.ax = parent.ax self.obj = parent.obj + self.include_groups = include_groups @no_type_check def _apply(self, f, *args, **kwargs): @@ -1628,7 +1644,7 @@ def func(x): return x.apply(f, *args, **kwargs) - result = self._groupby.apply(func) + result = _apply(self._groupby, func, include_groups=self.include_groups) return self._wrap_result(result) _upsample = _apply @@ -2003,6 +2019,7 @@ def get_resampler_for_grouping( limit: int | None = None, kind=None, on=None, + include_groups: bool = True, **kwargs, ) -> Resampler: """ @@ -2011,7 +2028,9 @@ def get_resampler_for_grouping( # .resample uses 'on' similar to how .groupby uses 'key' tg = TimeGrouper(freq=rule, key=on, **kwargs) resampler = tg._get_resampler(groupby.obj, kind=kind) - return resampler._get_resampler_for_grouping(groupby=groupby, key=tg.key) + return resampler._get_resampler_for_grouping( + groupby=groupby, include_groups=include_groups, key=tg.key + ) class TimeGrouper(Grouper): @@ -2789,3 +2808,18 @@ def maybe_warn_args_and_kwargs(cls, kernel: str, args, kwargs) -> None: category=FutureWarning, stacklevel=find_stack_level(), ) + + +def _apply( + grouped: GroupBy, how: Callable, *args, include_groups: bool, **kwargs +) -> DataFrame: + # GH#7155 - rewrite warning to appear as if it came from `.resample` + target_message = "DataFrameGroupBy.apply operated on the grouping columns" + new_message = _apply_groupings_depr.format("DataFrameGroupBy", "resample") + with rewrite_warning( + target_message=target_message, + target_category=FutureWarning, + new_message=new_message, + ): + result = grouped.apply(how, *args, include_groups=include_groups, **kwargs) + return result diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 924b56f7a14d5..e8ca520e7b420 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -449,7 +449,7 @@ def _all_key(): return (margins_name,) + ("",) * (len(cols) - 1) if len(rows) > 0: - margin = data[rows].groupby(rows, observed=observed).apply(aggfunc) + margin = data.groupby(rows, observed=observed)[rows].apply(aggfunc) all_key = _all_key() table[all_key] = margin result = table @@ -467,7 +467,7 @@ def _all_key(): margin_keys = table.columns if len(cols): - row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc) + row_margin = data.groupby(cols, observed=observed)[cols].apply(aggfunc) else: row_margin = Series(np.nan, index=result.columns) diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py index 489f43729a004..5c21c4f7137a5 100644 --- a/pandas/tests/extension/base/groupby.py +++ b/pandas/tests/extension/base/groupby.py @@ -108,9 +108,13 @@ def test_groupby_extension_transform(self, data_for_grouping): def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) - df.groupby("B", group_keys=False).apply(groupby_apply_op) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby("B", group_keys=False).apply(groupby_apply_op) df.groupby("B", group_keys=False).A.apply(groupby_apply_op) - df.groupby("A", group_keys=False).apply(groupby_apply_op) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby("A", group_keys=False).apply(groupby_apply_op) df.groupby("A", group_keys=False).B.apply(groupby_apply_op) def test_groupby_apply_identity(self, data_for_grouping): diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index dbd1f96fc17c9..b54a795af4fdc 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -1767,7 +1767,9 @@ def test_unstack_bug(self, future_stack): } ) - result = df.groupby(["state", "exp", "barcode", "v"]).apply(len) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(["state", "exp", "barcode", "v"]).apply(len) unstacked = result.unstack() restacked = unstacked.stack(future_stack=future_stack) diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 9d3ebbd3672ae..7ea107f254104 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -499,13 +499,17 @@ def test_agg_timezone_round_trip(): assert ts == grouped.first()["B"].iloc[0] # GH#27110 applying iloc should return a DataFrame - assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1] + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1] ts = df["B"].iloc[2] assert ts == grouped.last()["B"].iloc[0] # GH#27110 applying iloc should return a DataFrame - assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1] + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1] def test_sum_uint64_overflow(): diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index d04ee7cec0db1..abcb9f68e0f5c 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -28,7 +28,9 @@ def test_apply_func_that_appends_group_to_list_without_copy(): def store(group): groups.append(group) - df.groupby("index").apply(store) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby("index").apply(store) expected_value = DataFrame( {"index": [0] * 10, 0: [1] * 10}, index=pd.RangeIndex(0, 100, 10) ) @@ -71,9 +73,11 @@ def test_apply_issues(): ["2011.05.16", "2011.05.17", "2011.05.18"], dtype=object, name="date" ) expected = Series(["00:00", "02:00", "02:00"], index=exp_idx) - result = df.groupby("date", group_keys=False).apply( - lambda x: x["time"][x["value"].idxmax()] - ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("date", group_keys=False).apply( + lambda x: x["time"][x["value"].idxmax()] + ) tm.assert_series_equal(result, expected) @@ -179,7 +183,9 @@ def f_constant_df(group): for func in [f_copy, f_nocopy, f_scalar, f_none, f_constant_df]: del names[:] - df.groupby("a", group_keys=False).apply(func) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby("a", group_keys=False).apply(func) assert names == group_names @@ -197,9 +203,11 @@ def test_group_apply_once_per_group2(capsys): index=["0", "2", "4", "6", "8", "10", "12", "14"], ) - df.groupby("group_by_column", group_keys=False).apply( - lambda df: print("function_called") - ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby("group_by_column", group_keys=False).apply( + lambda df: print("function_called") + ) result = capsys.readouterr().out.count("function_called") # If `groupby` behaves unexpectedly, this test will break @@ -219,8 +227,11 @@ def slow(group): def fast(group): return group.copy() - fast_df = df.groupby("A", group_keys=False).apply(fast) - slow_df = df.groupby("A", group_keys=False).apply(slow) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + fast_df = df.groupby("A", group_keys=False).apply(fast) + with tm.assert_produces_warning(FutureWarning, match=msg): + slow_df = df.groupby("A", group_keys=False).apply(slow) tm.assert_frame_equal(fast_df, slow_df) @@ -242,7 +253,9 @@ def test_groupby_apply_identity_maybecopy_index_identical(func): df = DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}) - result = df.groupby("g", group_keys=False).apply(func) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("g", group_keys=False).apply(func) tm.assert_frame_equal(result, df) @@ -285,8 +298,11 @@ def test_groupby_as_index_apply(): tm.assert_index_equal(res_as, exp) tm.assert_index_equal(res_not_as, exp) - res_as_apply = g_as.apply(lambda x: x.head(2)).index - res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + res_as_apply = g_as.apply(lambda x: x.head(2)).index + with tm.assert_produces_warning(FutureWarning, match=msg): + res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index # apply doesn't maintain the original ordering # changed in GH5610 as the as_index=False returns a MI here @@ -299,7 +315,9 @@ def test_groupby_as_index_apply(): ind = Index(list("abcde")) df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind) - res = df.groupby(0, as_index=False, group_keys=False).apply(lambda x: x).index + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.groupby(0, as_index=False, group_keys=False).apply(lambda x: x).index tm.assert_index_equal(res, ind) @@ -328,13 +346,19 @@ def desc3(group): # weirdo return result - result = grouped.apply(desc) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.apply(desc) assert result.index.names == ("A", "B", "stat") - result2 = grouped.apply(desc2) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result2 = grouped.apply(desc2) assert result2.index.names == ("A", "B", "stat") - result3 = grouped.apply(desc3) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result3 = grouped.apply(desc3) assert result3.index.names == ("A", "B", None) @@ -364,7 +388,9 @@ def test_apply_series_yield_constant(df): def test_apply_frame_yield_constant(df): # GH13568 - result = df.groupby(["A", "B"]).apply(len) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(["A", "B"]).apply(len) assert isinstance(result, Series) assert result.name is None @@ -375,7 +401,9 @@ def test_apply_frame_yield_constant(df): def test_apply_frame_to_series(df): grouped = df.groupby(["A", "B"]) - result = grouped.apply(len) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.apply(len) expected = grouped.count()["C"] tm.assert_index_equal(result.index, expected.index) tm.assert_numpy_array_equal(result.values, expected.values) @@ -384,7 +412,9 @@ def test_apply_frame_to_series(df): def test_apply_frame_not_as_index_column_name(df): # GH 35964 - path within _wrap_applied_output not hit by a test grouped = df.groupby(["A", "B"], as_index=False) - result = grouped.apply(len) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.apply(len) expected = grouped.count().rename(columns={"C": np.nan}).drop(columns="D") # TODO(GH#34306): Use assert_frame_equal when column name is not np.nan tm.assert_index_equal(result.index, expected.index) @@ -407,7 +437,9 @@ def trans2(group): } ) - result = df.groupby("A").apply(trans) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").apply(trans) exp = df.groupby("A")["C"].apply(trans2) tm.assert_series_equal(result, exp, check_names=False) assert result.name == "C" @@ -436,7 +468,9 @@ def test_apply_chunk_view(group_keys): # Low level tinkering could be unsafe, make sure not df = DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)}) - result = df.groupby("key", group_keys=group_keys).apply(lambda x: x.iloc[:2]) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("key", group_keys=group_keys).apply(lambda x: x.iloc[:2]) expected = df.take([0, 1, 3, 4, 6, 7]) if group_keys: expected.index = MultiIndex.from_arrays( @@ -457,7 +491,9 @@ def test_apply_no_name_column_conflict(): # it works! #2605 grouped = df.groupby(["name", "name2"]) - grouped.apply(lambda x: x.sort_values("value", inplace=True)) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped.apply(lambda x: x.sort_values("value", inplace=True)) def test_apply_typecast_fail(): @@ -474,7 +510,9 @@ def f(group): group["v2"] = (v - v.min()) / (v.max() - v.min()) return group - result = df.groupby("d", group_keys=False).apply(f) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("d", group_keys=False).apply(f) expected = df.copy() expected["v2"] = np.tile([0.0, 0.5, 1], 2) @@ -498,7 +536,9 @@ def f(group): group["v2"] = (v - v.min()) / (v.max() - v.min()) return group - result = df.groupby("d", group_keys=False).apply(f) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("d", group_keys=False).apply(f) expected = df.copy() expected["v2"] = np.tile([0.0, 0.5, 1], 2) @@ -536,8 +576,11 @@ def filt2(x): else: return x[x.category == "c"] - expected = data.groupby("id_field").apply(filt1) - result = data.groupby("id_field").apply(filt2) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = data.groupby("id_field").apply(filt1) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = data.groupby("id_field").apply(filt2) tm.assert_frame_equal(result, expected) @@ -556,7 +599,9 @@ def test_apply_with_duplicated_non_sorted_axis(test_series): expected = ser.sort_index() tm.assert_series_equal(result, expected) else: - result = df.groupby("Y", group_keys=False).apply(lambda x: x) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("Y", group_keys=False).apply(lambda x: x) # not expecting the order to remain the same for duplicated axis result = result.sort_values("Y") @@ -601,7 +646,9 @@ def f(g): g["value3"] = g["value1"] * 2 return g - result = grouped.apply(f) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.apply(f) assert "value3" in result @@ -615,9 +662,13 @@ def test_apply_numeric_coercion_when_datetime(): df = DataFrame( {"Number": [1, 2], "Date": ["2017-03-02"] * 2, "Str": ["foo", "inf"]} ) - expected = df.groupby(["Number"]).apply(lambda x: x.iloc[0]) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby(["Number"]).apply(lambda x: x.iloc[0]) df.Date = pd.to_datetime(df.Date) - result = df.groupby(["Number"]).apply(lambda x: x.iloc[0]) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(["Number"]).apply(lambda x: x.iloc[0]) tm.assert_series_equal(result["Str"], expected["Str"]) # GH 15421 @@ -628,7 +679,9 @@ def test_apply_numeric_coercion_when_datetime(): def get_B(g): return g.iloc[0][["B"]] - result = df.groupby("A").apply(get_B)["B"] + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").apply(get_B)["B"] expected = df.B expected.index = df.A tm.assert_series_equal(result, expected) @@ -653,8 +706,11 @@ def predictions(tool): ) df2 = df1.copy() df2.oTime = pd.to_datetime(df2.oTime) - expected = df1.groupby("Key").apply(predictions).p1 - result = df2.groupby("Key").apply(predictions).p1 + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df1.groupby("Key").apply(predictions).p1 + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df2.groupby("Key").apply(predictions).p1 tm.assert_series_equal(expected, result) @@ -669,11 +725,13 @@ def test_apply_aggregating_timedelta_and_datetime(): } ) df["time_delta_zero"] = df.datetime - df.datetime - result = df.groupby("clientid").apply( - lambda ddf: Series( - {"clientid_age": ddf.time_delta_zero.min(), "date": ddf.datetime.min()} + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("clientid").apply( + lambda ddf: Series( + {"clientid_age": ddf.time_delta_zero.min(), "date": ddf.datetime.min()} + ) ) - ) expected = DataFrame( { "clientid": ["A", "B", "C"], @@ -716,11 +774,15 @@ def func_with_no_date(batch): def func_with_date(batch): return Series({"b": datetime(2015, 1, 1), "c": 2}) - dfg_no_conversion = df.groupby(by=["a"]).apply(func_with_no_date) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + dfg_no_conversion = df.groupby(by=["a"]).apply(func_with_no_date) dfg_no_conversion_expected = DataFrame({"c": 2}, index=[1]) dfg_no_conversion_expected.index.name = "a" - dfg_conversion = df.groupby(by=["a"]).apply(func_with_date) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + dfg_conversion = df.groupby(by=["a"]).apply(func_with_date) dfg_conversion_expected = DataFrame( {"b": pd.Timestamp(2015, 1, 1).as_unit("ns"), "c": 2}, index=[1] ) @@ -764,7 +826,9 @@ def test_groupby_apply_all_none(): def test_func(x): pass - result = test_df.groupby("groups").apply(test_func) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = test_df.groupby("groups").apply(test_func) expected = DataFrame() tm.assert_frame_equal(result, expected) @@ -779,8 +843,11 @@ def test_func(x): return None return x.iloc[[0, -1]] - result1 = test_df1.groupby("groups").apply(test_func) - result2 = test_df2.groupby("groups").apply(test_func) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result1 = test_df1.groupby("groups").apply(test_func) + with tm.assert_produces_warning(FutureWarning, match=msg): + result2 = test_df2.groupby("groups").apply(test_func) index1 = MultiIndex.from_arrays([[1, 1], [0, 2]], names=["groups", None]) index2 = MultiIndex.from_arrays([[2, 2], [1, 3]], names=["groups", None]) expected1 = DataFrame({"groups": [1, 1], "vars": [0, 2]}, index=index1) @@ -793,7 +860,9 @@ def test_groupby_apply_return_empty_chunk(): # GH 22221: apply filter which returns some empty groups df = DataFrame({"value": [0, 1], "group": ["filled", "empty"]}) groups = df.groupby("group") - result = groups.apply(lambda group: group[group.value != 1]["value"]) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = groups.apply(lambda group: group[group.value != 1]["value"]) expected = Series( [0], name="value", @@ -820,7 +889,9 @@ def test_apply_with_mixed_types(): def test_func_returns_object(): # GH 28652 df = DataFrame({"a": [1, 2]}, index=Index([1, 2])) - result = df.groupby("a").apply(lambda g: g.index) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("a").apply(lambda g: g.index) expected = Series([Index([1]), Index([2])], index=Index([1, 2], name="a")) tm.assert_series_equal(result, expected) @@ -837,7 +908,9 @@ def test_apply_datetime_issue(group_column_dtlike): # standard int values in range(len(num_columns)) df = DataFrame({"a": ["foo"], "b": [group_column_dtlike]}) - result = df.groupby("a").apply(lambda x: Series(["spam"], index=[42])) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("a").apply(lambda x: Series(["spam"], index=[42])) expected = DataFrame( ["spam"], Index(["foo"], dtype="object", name="a"), columns=[42] @@ -876,7 +949,9 @@ def test_apply_series_return_dataframe_groups(): def most_common_values(df): return Series({c: s.value_counts().index[0] for c, s in df.items()}) - result = tdf.groupby("day").apply(most_common_values)["userId"] + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = tdf.groupby("day").apply(most_common_values)["userId"] expected = Series( ["17661101"], index=pd.DatetimeIndex(["2015-02-24"], name="day"), name="userId" ) @@ -917,7 +992,9 @@ def test_groupby_apply_datetime_result_dtypes(): ], columns=["observation", "color", "mood", "intensity", "score"], ) - result = data.groupby("color").apply(lambda g: g.iloc[0]).dtypes + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = data.groupby("color").apply(lambda g: g.iloc[0]).dtypes expected = Series( [np.dtype("datetime64[ns]"), object, object, np.int64, object], index=["observation", "color", "mood", "intensity", "score"], @@ -937,7 +1014,9 @@ def test_groupby_apply_datetime_result_dtypes(): def test_apply_index_has_complex_internals(index): # GH 31248 df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index) - result = df.groupby("group", group_keys=False).apply(lambda x: x) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("group", group_keys=False).apply(lambda x: x) tm.assert_frame_equal(result, df) @@ -960,7 +1039,9 @@ def test_apply_index_has_complex_internals(index): def test_apply_function_returns_non_pandas_non_scalar(function, expected_values): # GH 31441 df = DataFrame(["A", "A", "B", "B"], columns=["groups"]) - result = df.groupby("groups").apply(function) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("groups").apply(function) expected = Series(expected_values, index=Index(["A", "B"], name="groups")) tm.assert_series_equal(result, expected) @@ -972,7 +1053,9 @@ def fct(group): df = DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]}) - result = df.groupby("A").apply(fct) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").apply(fct) expected = Series( [[1.0, 2.0], [3.0], [np.nan]], index=Index(["a", "b", "none"], name="A") ) @@ -983,7 +1066,9 @@ def fct(group): def test_apply_function_index_return(function): # GH: 22541 df = DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"]) - result = df.groupby("id").apply(function) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("id").apply(function) expected = Series( [Index([0, 4, 7, 9]), Index([1, 2, 3, 5]), Index([6, 8])], index=Index([1, 2, 3], name="id"), @@ -1019,7 +1104,9 @@ def test_apply_result_type(group_keys, udf): # We'd like to control whether the group keys end up in the index # regardless of whether the UDF happens to be a transform. df = DataFrame({"A": ["a", "b"], "B": [1, 2]}) - df_result = df.groupby("A", group_keys=group_keys).apply(udf) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + df_result = df.groupby("A", group_keys=group_keys).apply(udf) series_result = df.B.groupby(df.A, group_keys=group_keys).apply(udf) if group_keys: @@ -1034,8 +1121,11 @@ def test_result_order_group_keys_false(): # GH 34998 # apply result order should not depend on whether index is the same or just equal df = DataFrame({"A": [2, 1, 2], "B": [1, 2, 3]}) - result = df.groupby("A", group_keys=False).apply(lambda x: x) - expected = df.groupby("A", group_keys=False).apply(lambda x: x.copy()) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A", group_keys=False).apply(lambda x: x) + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby("A", group_keys=False).apply(lambda x: x.copy()) tm.assert_frame_equal(result, expected) @@ -1047,8 +1137,15 @@ def test_apply_with_timezones_aware(): df1 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz}) df2 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz}) - result1 = df1.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) - result2 = df2.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result1 = df1.groupby("x", group_keys=False).apply( + lambda df: df[["x", "y"]].copy() + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result2 = df2.groupby("x", group_keys=False).apply( + lambda df: df[["x", "y"]].copy() + ) tm.assert_frame_equal(result1, result2) @@ -1103,7 +1200,9 @@ def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp(): ) grp = df.groupby(["A", "B"]) - result = grp.apply(lambda x: x.head(1)) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grp.apply(lambda x: x.head(1)) expected = df.iloc[[0, 2, 3]] expected = expected.reset_index() @@ -1151,7 +1250,9 @@ def test_apply_dropna_with_indexed_same(dropna): }, index=list("xxyxz"), ) - result = df.groupby("group", dropna=dropna, group_keys=False).apply(lambda x: x) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("group", dropna=dropna, group_keys=False).apply(lambda x: x) expected = df.dropna() if dropna else df.iloc[[0, 3, 1, 2, 4]] tm.assert_frame_equal(result, expected) @@ -1176,7 +1277,9 @@ def test_apply_dropna_with_indexed_same(dropna): def test_apply_as_index_constant_lambda(as_index, expected): # GH 13217 df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 1, 2, 2], "c": [1, 1, 1, 1]}) - result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1) tm.assert_equal(result, expected) @@ -1186,7 +1289,9 @@ def test_sort_index_groups(): {"A": [1, 2, 3, 4, 5], "B": [6, 7, 8, 9, 0], "C": [1, 1, 1, 2, 2]}, index=range(5), ) - result = df.groupby("C").apply(lambda x: x.A.sort_index()) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("C").apply(lambda x: x.A.sort_index()) expected = Series( range(1, 6), index=MultiIndex.from_tuples( @@ -1206,9 +1311,11 @@ def test_positional_slice_groups_datetimelike(): "let": list("abcde"), } ) - result = expected.groupby( - [expected.let, expected.date.dt.date], group_keys=False - ).apply(lambda x: x.iloc[0:]) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = expected.groupby( + [expected.let, expected.date.dt.date], group_keys=False + ).apply(lambda x: x.iloc[0:]) tm.assert_frame_equal(result, expected) @@ -1251,24 +1358,29 @@ def test_apply_na(dropna): {"grp": [1, 1, 2, 2], "y": [1, 0, 2, 5], "z": [1, 2, np.nan, np.nan]} ) dfgrp = df.groupby("grp", dropna=dropna) - result = dfgrp.apply(lambda grp_df: grp_df.nlargest(1, "z")) - expected = dfgrp.apply(lambda x: x.sort_values("z", ascending=False).head(1)) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = dfgrp.apply(lambda grp_df: grp_df.nlargest(1, "z")) + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = dfgrp.apply(lambda x: x.sort_values("z", ascending=False).head(1)) tm.assert_frame_equal(result, expected) def test_apply_empty_string_nan_coerce_bug(): # GH#24903 - result = ( - DataFrame( - { - "a": [1, 1, 2, 2], - "b": ["", "", "", ""], - "c": pd.to_datetime([1, 2, 3, 4], unit="s"), - } + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ( + DataFrame( + { + "a": [1, 1, 2, 2], + "b": ["", "", "", ""], + "c": pd.to_datetime([1, 2, 3, 4], unit="s"), + } + ) + .groupby(["a", "b"]) + .apply(lambda df: df.iloc[-1]) ) - .groupby(["a", "b"]) - .apply(lambda df: df.iloc[-1]) - ) expected = DataFrame( [[1, "", pd.to_datetime(2, unit="s")], [2, "", pd.to_datetime(4, unit="s")]], columns=["a", "b", "c"], @@ -1293,9 +1405,11 @@ def test_apply_index_key_error_bug(index_values): }, index=Index(["a2", "a3", "aa"], name="a"), ) - result = result.groupby("a").apply( - lambda df: Series([df["b"].mean()], index=["b_mean"]) - ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = result.groupby("a").apply( + lambda df: Series([df["b"].mean()], index=["b_mean"]) + ) tm.assert_frame_equal(result, expected) @@ -1343,7 +1457,9 @@ def test_apply_index_key_error_bug(index_values): def test_apply_nonmonotonic_float_index(arg, idx): # GH 34455 expected = DataFrame({"col": arg}, index=idx) - result = expected.groupby("col", group_keys=False).apply(lambda x: x) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = expected.groupby("col", group_keys=False).apply(lambda x: x) tm.assert_frame_equal(result, expected) @@ -1390,33 +1506,16 @@ def test_empty_df(method, op): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize( - "group_col", - [([0.0, np.nan, 0.0, 0.0]), ([np.nan, 0.0, 0.0, 0.0]), ([0, 0.0, 0.0, np.nan])], -) -def test_apply_inconsistent_output(group_col): - # GH 34478 - df = DataFrame({"group_col": group_col, "value_col": [2, 2, 2, 2]}) - - result = df.groupby("group_col").value_col.apply( - lambda x: x.value_counts().reindex(index=[1, 2, 3]) - ) - expected = Series( - [np.nan, 3.0, np.nan], - name="value_col", - index=MultiIndex.from_product([[0.0], [1, 2, 3]], names=["group_col", 0.0]), - ) - - tm.assert_series_equal(result, expected) - - -def test_apply_array_output_multi_getitem(): - # GH 18930 - df = DataFrame( - {"A": {"a": 1, "b": 2}, "B": {"a": 1, "b": 2}, "C": {"a": 1, "b": 2}} - ) - result = df.groupby("A")[["B", "C"]].apply(lambda x: np.array([0])) - expected = Series( - [np.array([0])] * 2, index=Index([1, 2], name="A"), name=("B", "C") - ) - tm.assert_series_equal(result, expected) +@pytest.mark.parametrize("include_groups", [True, False]) +def test_include_groups(include_groups): + # GH#7155 + df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]}) + gb = df.groupby("a") + warn = FutureWarning if include_groups else None + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(warn, match=msg): + result = gb.apply(lambda x: x.sum(), include_groups=include_groups) + expected = DataFrame({"a": [2, 2], "b": [7, 5]}, index=Index([1, 2], name="a")) + if not include_groups: + expected = expected[["b"]] + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_apply_mutate.py b/pandas/tests/groupby/test_apply_mutate.py index 9bc07b584e9d1..09d5e06bf6ddd 100644 --- a/pandas/tests/groupby/test_apply_mutate.py +++ b/pandas/tests/groupby/test_apply_mutate.py @@ -13,10 +13,16 @@ def test_group_by_copy(): } ).set_index("name") - grp_by_same_value = df.groupby(["age"], group_keys=False).apply(lambda group: group) - grp_by_copy = df.groupby(["age"], group_keys=False).apply( - lambda group: group.copy() - ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + grp_by_same_value = df.groupby(["age"], group_keys=False).apply( + lambda group: group + ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + grp_by_copy = df.groupby(["age"], group_keys=False).apply( + lambda group: group.copy() + ) tm.assert_frame_equal(grp_by_same_value, grp_by_copy) @@ -47,8 +53,11 @@ def f_no_copy(x): x["rank"] = x.val.rank(method="min") return x.groupby("cat2")["rank"].min() - grpby_copy = df.groupby("cat1").apply(f_copy) - grpby_no_copy = df.groupby("cat1").apply(f_no_copy) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + grpby_copy = df.groupby("cat1").apply(f_copy) + with tm.assert_produces_warning(FutureWarning, match=msg): + grpby_no_copy = df.groupby("cat1").apply(f_no_copy) tm.assert_series_equal(grpby_copy, grpby_no_copy) @@ -58,8 +67,11 @@ def test_no_mutate_but_looks_like(): # second does not, but should yield the same results df = pd.DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)}) - result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key) - result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key) + with tm.assert_produces_warning(FutureWarning, match=msg): + result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key) tm.assert_series_equal(result1, result2) @@ -73,7 +85,9 @@ def fn(x): x.loc[x.index[-1], "col2"] = 0 return x.col2 - result = df.groupby(["col1"], as_index=False).apply(fn) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(["col1"], as_index=False).apply(fn) expected = pd.Series( [1, 2, 0, 4, 5, 0], index=pd.MultiIndex.from_tuples( diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index f2d21c10f7a15..b11240c841420 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -124,7 +124,9 @@ def test_basic(): # TODO: split this test def f(x): return x.drop_duplicates("person_name").iloc[0] - result = g.apply(f) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = g.apply(f) expected = x.iloc[[0, 1]].copy() expected.index = Index([1, 2], name="person_id") expected["person_name"] = expected["person_name"].astype("object") @@ -329,7 +331,9 @@ def test_apply(ordered): # but for transform we should still get back the original index idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"]) expected = Series(1, index=idx) - result = grouped.apply(lambda x: 1) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.apply(lambda x: 1) tm.assert_series_equal(result, expected) @@ -2013,7 +2017,10 @@ def test_category_order_apply(as_index, sort, observed, method, index_kind, orde df["a2"] = df["a"] df = df.set_index(keys) gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed) - op_result = getattr(gb, method)(lambda x: x.sum(numeric_only=True)) + warn = FutureWarning if method == "apply" and index_kind == "range" else None + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(warn, match=msg): + op_result = getattr(gb, method)(lambda x: x.sum(numeric_only=True)) if (method == "transform" or not as_index) and index_kind == "range": result = op_result["a"].cat.categories else: diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 25a4fd2550df6..16d7fe61b90ad 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -289,7 +289,9 @@ def test_count(): for key in ["1st", "2nd", ["1st", "2nd"]]: left = df.groupby(key).count() - right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1) tm.assert_frame_equal(left, right) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 0abf6428730ff..287310a18c7df 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -95,10 +95,12 @@ def test_builtins_apply(keys, f): assert result.shape == (ngroups, 3), assert_msg npfunc = lambda x: getattr(np, fname)(x, axis=0) # numpy's equivalent function - expected = gb.apply(npfunc) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = gb.apply(npfunc) tm.assert_frame_equal(result, expected) - with tm.assert_produces_warning(None): + with tm.assert_produces_warning(FutureWarning, match=msg): expected2 = gb.apply(lambda x: npfunc(x)) tm.assert_frame_equal(result, expected2) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 999a03d18644d..fdd959f0e8754 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -150,7 +150,9 @@ def test_groupby_nonobject_dtype(mframe, df_mixed_floats): def max_value(group): return group.loc[group["value"].idxmax()] - applied = df.groupby("A").apply(max_value) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + applied = df.groupby("A").apply(max_value) result = applied.dtypes expected = df.dtypes tm.assert_series_equal(result, expected) @@ -171,7 +173,9 @@ def f_0(grp): return grp.iloc[0] expected = df.groupby("A").first()[["B"]] - result = df.groupby("A").apply(f_0)[["B"]] + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").apply(f_0)[["B"]] tm.assert_frame_equal(result, expected) def f_1(grp): @@ -179,9 +183,10 @@ def f_1(grp): return None return grp.iloc[0] - result = df.groupby("A").apply(f_1)[["B"]] - # Cast to avoid upcast when setting nan below - e = expected.copy().astype("float64") + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").apply(f_1)[["B"]] + e = expected.copy() e.loc["Tiger"] = np.nan tm.assert_frame_equal(result, e) @@ -190,9 +195,10 @@ def f_2(grp): return None return grp.iloc[0] - result = df.groupby("A").apply(f_2)[["B"]] - # Explicit cast to float to avoid implicit cast when setting nan - e = expected.copy().astype({"B": "float"}) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").apply(f_2)[["B"]] + e = expected.copy() e.loc["Pony"] = np.nan tm.assert_frame_equal(result, e) @@ -202,7 +208,9 @@ def f_3(grp): return None return grp.iloc[0] - result = df.groupby("A").apply(f_3)[["C"]] + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").apply(f_3)[["C"]] e = df.groupby("A").first()[["C"]] e.loc["Pony"] = pd.NaT tm.assert_frame_equal(result, e) @@ -213,7 +221,9 @@ def f_4(grp): return None return grp.iloc[0].loc["C"] - result = df.groupby("A").apply(f_4) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").apply(f_4) e = df.groupby("A").first()["C"].copy() e.loc["Pony"] = np.nan e.name = None @@ -392,8 +402,11 @@ def f3(x): depr_msg = "The behavior of array concatenation with empty entries is deprecated" # correct result - result1 = df.groupby("a").apply(f1) - result2 = df2.groupby("a").apply(f1) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result1 = df.groupby("a").apply(f1) + with tm.assert_produces_warning(FutureWarning, match=msg): + result2 = df2.groupby("a").apply(f1) tm.assert_frame_equal(result1, result2) # should fail (not the same number of levels) @@ -1322,11 +1335,15 @@ def summarize_random_name(df): # inconsistent. return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"]) - metrics = df.groupby("A").apply(summarize) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + metrics = df.groupby("A").apply(summarize) assert metrics.columns.name is None - metrics = df.groupby("A").apply(summarize, "metrics") + with tm.assert_produces_warning(FutureWarning, match=msg): + metrics = df.groupby("A").apply(summarize, "metrics") assert metrics.columns.name == "metrics" - metrics = df.groupby("A").apply(summarize_random_name) + with tm.assert_produces_warning(FutureWarning, match=msg): + metrics = df.groupby("A").apply(summarize_random_name) assert metrics.columns.name is None @@ -1619,7 +1636,9 @@ def test_dont_clobber_name_column(): {"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2} ) - result = df.groupby("key", group_keys=False).apply(lambda x: x) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("key", group_keys=False).apply(lambda x: x) tm.assert_frame_equal(result, df) @@ -1693,7 +1712,9 @@ def freducex(x): grouped = df.groupby(grouper, group_keys=False) # make sure all these work - grouped.apply(f) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped.apply(f) grouped.aggregate(freduce) grouped.aggregate({"C": freduce, "D": freduce}) grouped.transform(f) @@ -1714,7 +1735,9 @@ def f(group): names.append(group.name) return group.copy() - df.groupby("a", sort=False, group_keys=False).apply(f) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby("a", sort=False, group_keys=False).apply(f) expected_names = [0, 1, 2] assert names == expected_names @@ -1920,7 +1943,9 @@ def test_groupby_preserves_sort(sort_column, group_column): def test_sort(x): tm.assert_frame_equal(x, x.sort_values(by=sort_column)) - g.apply(test_sort) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + g.apply(test_sort) def test_pivot_table_values_key_error(): @@ -2102,7 +2127,9 @@ def test_empty_groupby_apply_nonunique_columns(): df[3] = df[3].astype(np.int64) df.columns = [0, 1, 2, 0] gb = df.groupby(df[1], group_keys=False) - res = gb.apply(lambda x: x) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = gb.apply(lambda x: x) assert (res.dtypes == df.dtypes).all() diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index 099e7bc3890d0..d82278c277d48 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -324,7 +324,9 @@ def test_groupby_apply_with_dropna_for_multi_index(dropna, data, selected_data, df = pd.DataFrame(data) gb = df.groupby("groups", dropna=dropna) - result = gb.apply(lambda grp: pd.DataFrame({"values": range(len(grp))})) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.apply(lambda grp: pd.DataFrame({"values": range(len(grp))})) mi_tuples = tuple(zip(data["groups"], selected_data["values"])) mi = pd.MultiIndex.from_tuples(mi_tuples, names=["groups", None]) diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py index 773c1e60e97af..601e67bbca5e3 100644 --- a/pandas/tests/groupby/test_groupby_subclass.py +++ b/pandas/tests/groupby/test_groupby_subclass.py @@ -63,7 +63,9 @@ def func(group): assert hasattr(group, "testattr") return group.testattr - result = custom_df.groupby("c").apply(func) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = custom_df.groupby("c").apply(func) expected = tm.SubclassedSeries(["hello"] * 3, index=Index([7, 8, 9], name="c")) tm.assert_series_equal(result, expected) @@ -101,5 +103,7 @@ def test_groupby_resample_preserves_subclass(obj): df = df.set_index("Date") # Confirm groupby.resample() preserves dataframe type - result = df.groupby("Buyer").resample("5D").sum() + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("Buyer").resample("5D").sum() assert isinstance(result, obj) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index e0793ada679c2..d05b60fd56b5f 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -224,7 +224,9 @@ def test_grouper_creation_bug(self): result = g.sum() tm.assert_frame_equal(result, expected) - result = g.apply(lambda x: x.sum()) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = g.apply(lambda x: x.sum()) expected["A"] = [0, 2, 4] expected = expected.loc[:, ["A", "B"]] tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 55f96bd1443de..1a26559ef4447 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -470,8 +470,12 @@ def test_timegrouper_apply_return_type_series(self): def sumfunc_series(x): return Series([x["value"].sum()], ("sum",)) - expected = df.groupby(Grouper(key="date")).apply(sumfunc_series) - result = df_dt.groupby(Grouper(freq="M", key="date")).apply(sumfunc_series) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby(Grouper(key="date")).apply(sumfunc_series) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df_dt.groupby(Grouper(freq="M", key="date")).apply(sumfunc_series) tm.assert_frame_equal( result.reset_index(drop=True), expected.reset_index(drop=True) ) @@ -487,8 +491,11 @@ def test_timegrouper_apply_return_type_value(self): def sumfunc_value(x): return x.value.sum() - expected = df.groupby(Grouper(key="date")).apply(sumfunc_value) - result = df_dt.groupby(Grouper(freq="M", key="date")).apply(sumfunc_value) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby(Grouper(key="date")).apply(sumfunc_value) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df_dt.groupby(Grouper(freq="M", key="date")).apply(sumfunc_value) tm.assert_series_equal( result.reset_index(drop=True), expected.reset_index(drop=True) ) @@ -895,7 +902,9 @@ def test_groupby_apply_timegrouper_with_nat_apply_squeeze( assert gb._selected_obj._get_axis(gb.axis).nlevels == 1 # function that returns a Series - res = gb.apply(lambda x: x["Quantity"] * 2) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = gb.apply(lambda x: x["Quantity"] * 2) expected = DataFrame( [[36, 6, 6, 10, 2]], diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py index 7c50124e57e29..944dda8977882 100644 --- a/pandas/tests/groupby/test_value_counts.py +++ b/pandas/tests/groupby/test_value_counts.py @@ -327,9 +327,12 @@ def test_against_frame_and_seriesgroupby( ) if frame: # compare against apply with DataFrame value_counts - expected = gp.apply( - _frame_value_counts, ["gender", "education"], normalize, sort, ascending - ) + warn = FutureWarning if groupby == "column" else None + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(warn, match=msg): + expected = gp.apply( + _frame_value_counts, ["gender", "education"], normalize, sort, ascending + ) if as_index: tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 062dfe3931423..acb4b93ba1af3 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -636,7 +636,9 @@ def f(group): return group[:1] grouped = df.groupby("c") - result = grouped.apply(f) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.apply(f) assert result["d"].dtype == np.float64 @@ -790,7 +792,13 @@ def test_cython_transform_frame(request, op, args, targop, df_fix, gb_target): f = gb[["float", "float_missing"]].apply(targop) expected = concat([f, i], axis=1) else: - expected = gb.apply(targop) + if op != "shift" or not isinstance(gb_target.get("by"), (str, list)): + warn = None + else: + warn = FutureWarning + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(warn, match=msg): + expected = gb.apply(targop) expected = expected.sort_index(axis=1) if op == "shift": diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 66ecb93385a87..a955fa0b096f0 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1077,8 +1077,12 @@ def test_resample_segfault(unit): all_wins_and_wagers, columns=("ID", "timestamp", "A", "B") ).set_index("timestamp") df.index = df.index.as_unit(unit) - result = df.groupby("ID").resample("5min").sum() - expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum()) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("ID").resample("5min").sum() + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum()) tm.assert_frame_equal(result, expected) @@ -1097,7 +1101,9 @@ def test_resample_dtype_preservation(unit): result = df.resample("1D").ffill() assert result.val.dtype == np.int32 - result = df.groupby("group").resample("1D").ffill() + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("group").resample("1D").ffill() assert result.val.dtype == np.int32 @@ -1823,8 +1829,12 @@ def f(data, add_arg): # Testing dataframe df = DataFrame({"A": 1, "B": 2}, index=date_range("2017", periods=10)) - result = df.groupby("A").resample("D").agg(f, multiplier).astype(float) - expected = df.groupby("A").resample("D").mean().multiply(multiplier) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").resample("D").agg(f, multiplier).astype(float) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby("A").resample("D").mean().multiply(multiplier) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index 1b20a7b99d1d7..f331851596317 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -77,7 +77,9 @@ def test_groupby_resample_api(): ) index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i], names=["group", "date"]) expected = DataFrame({"val": [5] * 7 + [6] + [7] * 7 + [8]}, index=index) - result = df.groupby("group").apply(lambda x: x.resample("1D").ffill())[["val"]] + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("group").apply(lambda x: x.resample("1D").ffill())[["val"]] tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index 6f4f1154907dc..d47a8132f26bb 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -68,8 +68,12 @@ def test_deferred_with_groupby(): def f_0(x): return x.set_index("date").resample("D").asfreq() - expected = df.groupby("id").apply(f_0) - result = df.set_index("date").groupby("id").resample("D").asfreq() + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby("id").apply(f_0) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.set_index("date").groupby("id").resample("D").asfreq() tm.assert_frame_equal(result, expected) df = DataFrame( @@ -83,8 +87,12 @@ def f_0(x): def f_1(x): return x.resample("1D").ffill() - expected = df.groupby("group").apply(f_1) - result = df.groupby("group").resample("1D").ffill() + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby("group").apply(f_1) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("group").resample("1D").ffill() tm.assert_frame_equal(result, expected) @@ -99,7 +107,9 @@ def test_getitem(test_frame): result = g.B.resample("2s").mean() tm.assert_series_equal(result, expected) - result = g.resample("2s").mean().B + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = g.resample("2s").mean().B tm.assert_series_equal(result, expected) @@ -230,8 +240,12 @@ def test_methods(f, test_frame): g = test_frame.groupby("A") r = g.resample("2s") - result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.resample("2s"), f)()) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = getattr(r, f)() + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(lambda x: getattr(x.resample("2s"), f)()) tm.assert_equal(result, expected) @@ -248,8 +262,12 @@ def test_methods_nunique(test_frame): def test_methods_std_var(f, test_frame): g = test_frame.groupby("A") r = g.resample("2s") - result = getattr(r, f)(ddof=1) - expected = g.apply(lambda x: getattr(x.resample("2s"), f)(ddof=1)) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = getattr(r, f)(ddof=1) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(lambda x: getattr(x.resample("2s"), f)(ddof=1)) tm.assert_frame_equal(result, expected) @@ -258,18 +276,24 @@ def test_apply(test_frame): r = g.resample("2s") # reduction - expected = g.resample("2s").sum() + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.resample("2s").sum() def f_0(x): return x.resample("2s").sum() - result = r.apply(f_0) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = r.apply(f_0) tm.assert_frame_equal(result, expected) def f_1(x): return x.resample("2s").apply(lambda y: y.sum()) - result = g.apply(f_1) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = g.apply(f_1) # y.sum() results in int64 instead of int32 on 32-bit architectures expected = expected.astype("int64") tm.assert_frame_equal(result, expected) @@ -337,7 +361,9 @@ def test_resample_groupby_with_label(): # GH 13235 index = date_range("2000-01-01", freq="2D", periods=5) df = DataFrame(index=index, data={"col0": [0, 0, 1, 1, 2], "col1": [1, 1, 1, 1, 1]}) - result = df.groupby("col0").resample("1W", label="left").sum() + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("col0").resample("1W", label="left").sum() mi = [ np.array([0, 0, 1, 2], dtype=np.int64), @@ -357,7 +383,9 @@ def test_consistency_with_window(test_frame): # consistent return values with window df = test_frame expected = Index([1, 2, 3], name="A") - result = df.groupby("A").resample("2s").mean() + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").resample("2s").mean() assert result.index.nlevels == 2 tm.assert_index_equal(result.index.levels[0], expected) @@ -455,7 +483,9 @@ def test_resample_groupby_agg_listlike(): def test_empty(keys): # GH 26411 df = DataFrame([], columns=["a", "b"], index=TimedeltaIndex([])) - result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean() + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean() expected = ( DataFrame(columns=["a", "b"]) .set_index(keys, drop=False) @@ -478,7 +508,8 @@ def test_resample_groupby_agg_object_dtype_all_nan(consolidate): if consolidate: df = df._consolidate() - result = df.groupby(["key"]).resample("W", on="date").min() + with tm.assert_produces_warning(FutureWarning): + result = df.groupby(["key"]).resample("W", on="date").min() idx = pd.MultiIndex.from_arrays( [ ["A"] * 3 + ["B"] * 3, @@ -530,7 +561,9 @@ def test_resample_no_index(keys): df = DataFrame([], columns=["a", "b", "date"]) df["date"] = pd.to_datetime(df["date"]) df = df.set_index("date") - result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean() + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean() expected = DataFrame(columns=["a", "b", "date"]).set_index(keys, drop=False) expected["date"] = pd.to_datetime(expected["date"]) expected = expected.set_index("date", append=True, drop=True) @@ -577,7 +610,9 @@ def test_groupby_resample_size_all_index_same(): {"A": [1] * 3 + [2] * 3 + [1] * 3 + [2] * 3, "B": np.arange(12)}, index=date_range("31/12/2000 18:00", freq="H", periods=12), ) - result = df.groupby("A").resample("D").size() + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").resample("D").size() expected = Series( 3, index=pd.MultiIndex.from_tuples( diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index d7fdbc4fe5f08..8b1eab552c97d 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -323,12 +323,14 @@ def test_groupby_resample_interpolate(): df["week_starting"] = date_range("01/01/2018", periods=3, freq="W") - result = ( - df.set_index("week_starting") - .groupby("volume") - .resample("1D") - .interpolate(method="linear") - ) + msg = "DataFrameGroupBy.resample operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ( + df.set_index("week_starting") + .groupby("volume") + .resample("1D") + .interpolate(method="linear") + ) expected_ind = pd.MultiIndex.from_tuples( [ diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py index 46ab00c3e2284..b8e0173ee131f 100644 --- a/pandas/tests/window/test_groupby.py +++ b/pandas/tests/window/test_groupby.py @@ -99,7 +99,9 @@ def test_rolling(self, f, roll_frame): r = g.rolling(window=4) result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.rolling(4), f)()) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(lambda x: getattr(x.rolling(4), f)()) # groupby.apply doesn't drop the grouped-by column expected = expected.drop("A", axis=1) # GH 39732 @@ -113,7 +115,9 @@ def test_rolling_ddof(self, f, roll_frame): r = g.rolling(window=4) result = getattr(r, f)(ddof=1) - expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1)) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1)) # groupby.apply doesn't drop the grouped-by column expected = expected.drop("A", axis=1) # GH 39732 @@ -129,9 +133,11 @@ def test_rolling_quantile(self, interpolation, roll_frame): r = g.rolling(window=4) result = r.quantile(0.4, interpolation=interpolation) - expected = g.apply( - lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation) - ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply( + lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation) + ) # groupby.apply doesn't drop the grouped-by column expected = expected.drop("A", axis=1) # GH 39732 @@ -174,7 +180,9 @@ def test_rolling_corr_cov_other_diff_size_as_groups(self, f, roll_frame): def func(x): return getattr(x.rolling(4), f)(roll_frame) - expected = g.apply(func) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(func) # GH 39591: The grouped column should be all np.nan # (groupby.apply inserts 0s for cov) expected["A"] = np.nan @@ -190,7 +198,9 @@ def test_rolling_corr_cov_pairwise(self, f, roll_frame): def func(x): return getattr(x.B.rolling(4), f)(pairwise=True) - expected = g.apply(func) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(func) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( @@ -235,7 +245,9 @@ def test_rolling_apply(self, raw, roll_frame): # reduction result = r.apply(lambda x: x.sum(), raw=raw) - expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) # groupby.apply doesn't drop the grouped-by column expected = expected.drop("A", axis=1) # GH 39732 @@ -784,9 +796,13 @@ def test_groupby_rolling_resulting_multiindex3(self): def test_groupby_rolling_object_doesnt_affect_groupby_apply(self, roll_frame): # GH 39732 g = roll_frame.groupby("A", group_keys=False) - expected = g.apply(lambda x: x.rolling(4).sum()).index + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(lambda x: x.rolling(4).sum()).index _ = g.rolling(window=4) - result = g.apply(lambda x: x.rolling(4).sum()).index + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = g.apply(lambda x: x.rolling(4).sum()).index tm.assert_index_equal(result, expected) @pytest.mark.parametrize( @@ -960,11 +976,13 @@ def test_groupby_monotonic(self): df["date"] = to_datetime(df["date"]) df = df.sort_values("date") - expected = ( - df.set_index("date") - .groupby("name") - .apply(lambda x: x.rolling("180D")["amount"].sum()) - ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = ( + df.set_index("date") + .groupby("name") + .apply(lambda x: x.rolling("180D")["amount"].sum()) + ) result = df.groupby("name").rolling("180D", on="date")["amount"].sum() tm.assert_series_equal(result, expected) @@ -983,9 +1001,13 @@ def test_datelike_on_monotonic_within_each_group(self): } ) - expected = ( - df.set_index("B").groupby("A").apply(lambda x: x.rolling("4s")["C"].mean()) - ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = ( + df.set_index("B") + .groupby("A") + .apply(lambda x: x.rolling("4s")["C"].mean()) + ) result = df.groupby("A").rolling("4s", on="B").C.mean() tm.assert_series_equal(result, expected) @@ -1015,7 +1037,9 @@ def test_expanding(self, f, frame): r = g.expanding() result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.expanding(), f)()) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(lambda x: getattr(x.expanding(), f)()) # groupby.apply doesn't drop the grouped-by column expected = expected.drop("A", axis=1) # GH 39732 @@ -1029,7 +1053,9 @@ def test_expanding_ddof(self, f, frame): r = g.expanding() result = getattr(r, f)(ddof=0) - expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0)) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0)) # groupby.apply doesn't drop the grouped-by column expected = expected.drop("A", axis=1) # GH 39732 @@ -1045,9 +1071,11 @@ def test_expanding_quantile(self, interpolation, frame): r = g.expanding() result = r.quantile(0.4, interpolation=interpolation) - expected = g.apply( - lambda x: x.expanding().quantile(0.4, interpolation=interpolation) - ) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply( + lambda x: x.expanding().quantile(0.4, interpolation=interpolation) + ) # groupby.apply doesn't drop the grouped-by column expected = expected.drop("A", axis=1) # GH 39732 @@ -1065,7 +1093,9 @@ def test_expanding_corr_cov(self, f, frame): def func_0(x): return getattr(x.expanding(), f)(frame) - expected = g.apply(func_0) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(func_0) # GH 39591: groupby.apply returns 1 instead of nan for windows # with all nan values null_idx = list(range(20, 61)) + list(range(72, 113)) @@ -1080,7 +1110,9 @@ def func_0(x): def func_1(x): return getattr(x.B.expanding(), f)(pairwise=True) - expected = g.apply(func_1) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply(func_1) tm.assert_series_equal(result, expected) def test_expanding_apply(self, raw, frame): @@ -1089,7 +1121,11 @@ def test_expanding_apply(self, raw, frame): # reduction result = r.apply(lambda x: x.sum(), raw=raw) - expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw)) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.apply( + lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw) + ) # groupby.apply doesn't drop the grouped-by column expected = expected.drop("A", axis=1) # GH 39732 diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 47534226f972f..0931dd209ee05 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -33,6 +33,7 @@ "_agg_template_series", "_agg_template_frame", "_pipe_template", + "_apply_groupings_depr", "__main__", "_transform_template", "_use_inf_as_na", From c118953c380ed82b01e57caee4b8b58f993760f4 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Thu, 7 Sep 2023 18:00:41 +0200 Subject: [PATCH 34/93] Improve error message for StringDtype with invalid storage (#55052) --- pandas/core/arrays/string_.py | 3 ++- pandas/tests/arrays/string_/test_string_arrow.py | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index c90127c0e9812..693ebad0ca16f 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -123,7 +123,8 @@ def __init__(self, storage=None) -> None: storage = get_option("mode.string_storage") if storage not in {"python", "pyarrow", "pyarrow_numpy"}: raise ValueError( - f"Storage must be 'python' or 'pyarrow'. Got {storage} instead." + f"Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'. " + f"Got {storage} instead." ) if storage in ("pyarrow", "pyarrow_numpy") and pa_version_under7p0: raise ImportError( diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py index 09f9f788dc3e4..fb6c338b8f8ea 100644 --- a/pandas/tests/arrays/string_/test_string_arrow.py +++ b/pandas/tests/arrays/string_/test_string_arrow.py @@ -255,3 +255,11 @@ def test_pickle_roundtrip(): result_sliced = pickle.loads(sliced_pickled) tm.assert_series_equal(result_sliced, expected_sliced) + + +@skip_if_no_pyarrow +def test_string_dtype_error_message(): + # GH#55051 + msg = "Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'." + with pytest.raises(ValueError, match=msg): + StringDtype("bla") From ba1e73500756fab064b83e6421ae7f42dffe55f8 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Thu, 7 Sep 2023 18:05:08 +0200 Subject: [PATCH 35/93] Fix pickle roundtrip for new arrow string dtype (#55051) --- pandas/core/arrays/string_arrow.py | 5 ++++- pandas/tests/arrays/string_/test_string_arrow.py | 5 +++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index a6838fbc73be9..6262055827428 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -523,7 +523,10 @@ def _result_converter(cls, values, na=None): def __getattribute__(self, item): # ArrowStringArray and we both inherit from ArrowExtensionArray, which # creates inheritance problems (Diamond inheritance) - if item in ArrowStringArrayMixin.__dict__ and item != "_pa_array": + if item in ArrowStringArrayMixin.__dict__ and item not in ( + "_pa_array", + "__dict__", + ): return partial(getattr(ArrowStringArrayMixin, item), self) return super().__getattribute__(item) diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py index fb6c338b8f8ea..c1d424f12bfc4 100644 --- a/pandas/tests/arrays/string_/test_string_arrow.py +++ b/pandas/tests/arrays/string_/test_string_arrow.py @@ -241,9 +241,10 @@ def test_setitem_invalid_indexer_raises(): @skip_if_no_pyarrow -def test_pickle_roundtrip(): +@pytest.mark.parametrize("dtype", ["string[pyarrow]", "string[pyarrow_numpy]"]) +def test_pickle_roundtrip(dtype): # GH 42600 - expected = pd.Series(range(10), dtype="string[pyarrow]") + expected = pd.Series(range(10), dtype=dtype) expected_sliced = expected.head(2) full_pickled = pickle.dumps(expected) sliced_pickled = pickle.dumps(expected_sliced) From b1e9b58340110b63dc74f5c5b5de4aaa8db184fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kai=20M=C3=BChlbauer?= Date: Thu, 7 Sep 2023 18:07:11 +0200 Subject: [PATCH 36/93] Fix docstring of Index.join in base.py (#55050) --- pandas/core/indexes/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6a397862712de..cd55997ad5f69 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4557,7 +4557,7 @@ def join( ------- join_index, (left_indexer, right_indexer) - Examples + Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx2 = pd.Index([4, 5, 6]) From a7cb22631dd607f8c36292118c69cbd7bed1485c Mon Sep 17 00:00:00 2001 From: Thomas Li <47963215+lithomas1@users.noreply.github.com> Date: Thu, 7 Sep 2023 12:28:45 -0400 Subject: [PATCH 37/93] BLD: Build wheels for Python 3.12 (#55010) * BLD: Build wheels for Python 3.12 * Update pyproject.toml * Update pyproject.toml * also circle * fix windows? * typo? * try single quotes * tyr to fix again * just use the base shared tag, no need to append windowsservercore * typo * update the other too * Update wheels.yml * try something * try something * debug * escape string? * go for green --- .circleci/config.yml | 5 ++--- .github/workflows/wheels.yml | 16 +++++++++------- pyproject.toml | 7 +++++-- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 50f6a116a6630..ba124533e953a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -48,7 +48,7 @@ jobs: name: Build aarch64 wheels no_output_timeout: 30m # Sometimes the tests won't generate any output, make sure the job doesn't get killed by that command: | - pip3 install cibuildwheel==2.14.1 + pip3 install cibuildwheel==2.15.0 cibuildwheel --prerelease-pythons --output-dir wheelhouse environment: CIBW_BUILD: << parameters.cibw-build >> @@ -92,5 +92,4 @@ workflows: only: /^v.*/ matrix: parameters: - # TODO: Enable Python 3.12 wheels when numpy releases a version that supports Python 3.12 - cibw-build: ["cp39-manylinux_aarch64", "cp310-manylinux_aarch64", "cp311-manylinux_aarch64"]#, "cp312-manylinux_aarch64"] + cibw-build: ["cp39-manylinux_aarch64", "cp310-manylinux_aarch64", "cp311-manylinux_aarch64", "cp312-manylinux_aarch64"] diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 5f541f1bae1fd..97d78a1a9afe3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -97,8 +97,7 @@ jobs: - [macos-12, macosx_*] - [windows-2022, win_amd64] # TODO: support PyPy? - # TODO: Enable Python 3.12 wheels when numpy releases a version that supports Python 3.12 - python: [["cp39", "3.9"], ["cp310", "3.10"], ["cp311", "3.11"]]#, ["cp312", "3.12"]] + python: [["cp39", "3.9"], ["cp310", "3.10"], ["cp311", "3.11"], ["cp312", "3.12"]] env: IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} @@ -150,8 +149,10 @@ jobs: uses: mamba-org/setup-micromamba@v1 with: environment-name: wheel-env + # Use a fixed Python, since we might have an unreleased Python not + # yet present on conda-forge create-args: >- - python=${{ matrix.python[1] }} + python=3.11 anaconda-client wheel cache-downloads: true @@ -167,12 +168,13 @@ jobs: shell: pwsh run: | $TST_CMD = @" - python -m pip install pytz six numpy python-dateutil tzdata>=2022.1 hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17; - python -m pip install --find-links=pandas\wheelhouse --no-index pandas; + python -m pip install hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17; + python -m pip install `$(Get-Item pandas\wheelhouse\*.whl); python -c `'import pandas as pd; pd.test(extra_args=[\"`\"--no-strict-data-files`\"\", \"`\"-m not clipboard and not single_cpu and not slow and not network and not db`\"\"])`'; "@ - docker pull python:${{ matrix.python[1] }}-windowsservercore - docker run --env PANDAS_CI='1' -v ${PWD}:C:\pandas python:${{ matrix.python[1] }}-windowsservercore powershell -Command $TST_CMD + # add rc to the end of the image name if the Python version is unreleased + docker pull python:${{ matrix.python[1] == '3.12' && '3.12-rc' || format('{0}-windowsservercore', matrix.python[1]) }} + docker run --env PANDAS_CI='1' -v ${PWD}:C:\pandas python:${{ matrix.python[1] == '3.12' && '3.12-rc' || format('{0}-windowsservercore', matrix.python[1]) }} powershell -Command $TST_CMD - uses: actions/upload-artifact@v3 with: diff --git a/pyproject.toml b/pyproject.toml index 845c2a63e84f0..74d6aaee286a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,8 @@ requires = [ # we don't want to force users to compile with 1.25 though # (Ideally, in the future, though, oldest-supported-numpy can be dropped when our min numpy is 1.25.x) "oldest-supported-numpy>=2022.8.16; python_version<'3.12'", - "numpy>=1.22.4; python_version>='3.12'", + # TODO: This needs to be updated when the official numpy 1.26 comes out + "numpy>=1.26.0b1; python_version>='3.12'", "versioneer[toml]" ] @@ -30,7 +31,9 @@ license = {file = 'LICENSE'} requires-python = '>=3.9' dependencies = [ "numpy>=1.22.4; python_version<'3.11'", - "numpy>=1.23.2; python_version>='3.11'", + "numpy>=1.23.2; python_version=='3.11'", + # TODO: This needs to be updated when the official numpy 1.26 comes out + "numpy>=1.26.0b1; python_version>='3.12'", "python-dateutil>=2.8.2", "pytz>=2020.1", "tzdata>=2022.1" From cea0cc0a54725ed234e2f51cc21a1182674a6032 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Thu, 7 Sep 2023 12:30:50 -0400 Subject: [PATCH 38/93] Convert test_sql to pytest idiom (#54936) * Convert test_sql to pytest idiom * Try KeyError catch * Added drop_view to existing test method * xfail MySQL issue --- pandas/io/sql.py | 2 +- pandas/tests/io/test_sql.py | 1008 ++++++++++++++++++++++------------- 2 files changed, 630 insertions(+), 380 deletions(-) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 2b139f8ca527c..0788d9da06eb9 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -138,7 +138,7 @@ def _parse_date_columns(data_frame, parse_dates): if isinstance(df_col.dtype, DatetimeTZDtype) or col_name in parse_dates: try: fmt = parse_dates[col_name] - except TypeError: + except (KeyError, TypeError): fmt = None data_frame.isetitem(i, _handle_date_column(df_col, format=fmt)) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index bfa93a4ff910e..bbdb22955297e 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -413,6 +413,8 @@ def mysql_pymysql_engine(iris_path, types_data): for entry in types_data: entry.pop("DateColWithTz") create_and_load_types(engine, types_data, "mysql") + if not insp.has_table("iris_view"): + create_and_load_iris_view(engine) yield engine with engine.connect() as conn: with conn.begin(): @@ -422,7 +424,7 @@ def mysql_pymysql_engine(iris_path, types_data): @pytest.fixture -def mysql_pymysql_conn(mysql_pymysql_engine): +def mysql_pymysql_conn(iris_path, mysql_pymysql_engine): with mysql_pymysql_engine.connect() as conn: yield conn @@ -440,6 +442,8 @@ def postgresql_psycopg2_engine(iris_path, types_data): create_and_load_iris(engine, iris_path, "postgresql") if not insp.has_table("types"): create_and_load_types(engine, types_data, "postgresql") + if not insp.has_table("iris_view"): + create_and_load_iris_view(engine) yield engine with engine.connect() as conn: with conn.begin(): @@ -462,9 +466,20 @@ def sqlite_str(): @pytest.fixture -def sqlite_engine(sqlite_str): +def sqlite_engine(sqlite_str, iris_path, types_data): sqlalchemy = pytest.importorskip("sqlalchemy") engine = sqlalchemy.create_engine(sqlite_str, poolclass=sqlalchemy.pool.NullPool) + + insp = sqlalchemy.inspect(engine) + if not insp.has_table("iris"): + create_and_load_iris(engine, iris_path, "sqlite") + if not insp.has_table("iris_view"): + create_and_load_iris_view(engine) + if not insp.has_table("types"): + for entry in types_data: + entry.pop("DateColWithTz") + create_and_load_types(engine, types_data, "sqlite") + yield engine engine.dispose() @@ -476,17 +491,25 @@ def sqlite_conn(sqlite_engine): @pytest.fixture -def sqlite_iris_str(sqlite_str, iris_path): +def sqlite_iris_str(sqlite_str, iris_path, types_data): sqlalchemy = pytest.importorskip("sqlalchemy") engine = sqlalchemy.create_engine(sqlite_str) - create_and_load_iris(engine, iris_path, "sqlite") + + insp = sqlalchemy.inspect(engine) + if not insp.has_table("iris"): + create_and_load_iris(engine, iris_path, "sqlite") + if not insp.has_table("iris_view"): + create_and_load_iris_view(engine) + if not insp.has_table("types"): + for entry in types_data: + entry.pop("DateColWithTz") + create_and_load_types(engine, types_data, "sqlite") engine.dispose() return sqlite_str @pytest.fixture def sqlite_iris_engine(sqlite_engine, iris_path): - create_and_load_iris(sqlite_engine, iris_path, "sqlite") return sqlite_engine @@ -499,6 +522,7 @@ def sqlite_iris_conn(sqlite_iris_engine): @pytest.fixture def sqlite_buildin(): with contextlib.closing(sqlite3.connect(":memory:")) as closing_conn: + create_and_load_iris_view(closing_conn) with closing_conn as conn: yield conn @@ -1097,6 +1121,7 @@ class PandasSQLTest: """ def load_iris_data(self, iris_path): + self.drop_view("iris_view", self.conn) self.drop_table("iris", self.conn) if isinstance(self.conn, sqlite3.Connection): create_and_load_iris_sqlite3(self.conn, iris_path) @@ -1221,470 +1246,695 @@ class DummyException(Exception): # -- Testing the public API -class _TestSQLApi(PandasSQLTest): - """ - Base class to test the public API. +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_read_sql_view(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_query("SELECT * FROM iris_view", conn) + check_iris_frame(iris_frame) - From this two classes are derived to run these tests for both the - sqlalchemy mode (`TestSQLApi`) and the fallback mode - (`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific - tests for the different sql flavours are included in `_TestSQLAlchemy`. - Notes: - flavor can always be passed even in SQLAlchemy mode, - should be correctly ignored. +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_read_sql_with_chunksize_no_result(conn, request): + conn = request.getfixturevalue(conn) + query = 'SELECT * FROM iris_view WHERE "SepalLength" < 0.0' + with_batch = sql.read_sql_query(query, conn, chunksize=5) + without_batch = sql.read_sql_query(query, conn) + tm.assert_frame_equal(concat(with_batch), without_batch) - we don't use drop_table because that isn't part of the public api - """ +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame1", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame1") - flavor = "sqlite" - mode: str + sql.to_sql(test_frame1, "test_frame1", conn) + assert sql.has_table("test_frame1", conn) - @pytest.fixture(autouse=True) - def setup_method(self, iris_path, types_data): - self.conn = self.connect() - self.load_iris_data(iris_path) - self.load_types_data(types_data) - self.load_test_data_and_sql() - def load_test_data_and_sql(self): - create_and_load_iris_view(self.conn) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_fail(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame2", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame2") - def test_read_sql_view(self): - iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn) - check_iris_frame(iris_frame) + sql.to_sql(test_frame1, "test_frame2", conn, if_exists="fail") + assert sql.has_table("test_frame2", conn) - def test_read_sql_with_chunksize_no_result(self): - query = "SELECT * FROM iris_view WHERE SepalLength < 0.0" - with_batch = sql.read_sql_query(query, self.conn, chunksize=5) - without_batch = sql.read_sql_query(query, self.conn) - tm.assert_frame_equal(concat(with_batch), without_batch) + msg = "Table 'test_frame2' already exists" + with pytest.raises(ValueError, match=msg): + sql.to_sql(test_frame1, "test_frame2", conn, if_exists="fail") - def test_to_sql(self, test_frame1): - sql.to_sql(test_frame1, "test_frame1", self.conn) - assert sql.has_table("test_frame1", self.conn) - def test_to_sql_fail(self, test_frame1): - sql.to_sql(test_frame1, "test_frame2", self.conn, if_exists="fail") - assert sql.has_table("test_frame2", self.conn) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_replace(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame3", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame3") - msg = "Table 'test_frame2' already exists" - with pytest.raises(ValueError, match=msg): - sql.to_sql(test_frame1, "test_frame2", self.conn, if_exists="fail") + sql.to_sql(test_frame1, "test_frame3", conn, if_exists="fail") + # Add to table again + sql.to_sql(test_frame1, "test_frame3", conn, if_exists="replace") + assert sql.has_table("test_frame3", conn) - def test_to_sql_replace(self, test_frame1): - sql.to_sql(test_frame1, "test_frame3", self.conn, if_exists="fail") - # Add to table again - sql.to_sql(test_frame1, "test_frame3", self.conn, if_exists="replace") - assert sql.has_table("test_frame3", self.conn) + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame3") - num_entries = len(test_frame1) - num_rows = count_rows(self.conn, "test_frame3") + assert num_rows == num_entries - assert num_rows == num_entries - def test_to_sql_append(self, test_frame1): - assert sql.to_sql(test_frame1, "test_frame4", self.conn, if_exists="fail") == 4 +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_append(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame4", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame4") - # Add to table again - assert ( - sql.to_sql(test_frame1, "test_frame4", self.conn, if_exists="append") == 4 - ) - assert sql.has_table("test_frame4", self.conn) + assert sql.to_sql(test_frame1, "test_frame4", conn, if_exists="fail") == 4 - num_entries = 2 * len(test_frame1) - num_rows = count_rows(self.conn, "test_frame4") + # Add to table again + assert sql.to_sql(test_frame1, "test_frame4", conn, if_exists="append") == 4 + assert sql.has_table("test_frame4", conn) - assert num_rows == num_entries + num_entries = 2 * len(test_frame1) + num_rows = count_rows(conn, "test_frame4") - def test_to_sql_type_mapping(self, test_frame3): - sql.to_sql(test_frame3, "test_frame5", self.conn, index=False) - result = sql.read_sql("SELECT * FROM test_frame5", self.conn) + assert num_rows == num_entries - tm.assert_frame_equal(test_frame3, result) - def test_to_sql_series(self): - s = Series(np.arange(5, dtype="int64"), name="series") - sql.to_sql(s, "test_series", self.conn, index=False) - s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn) - tm.assert_frame_equal(s.to_frame(), s2) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_type_mapping(conn, request, test_frame3): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame5", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame5") + + sql.to_sql(test_frame3, "test_frame5", conn, index=False) + result = sql.read_sql("SELECT * FROM test_frame5", conn) - def test_roundtrip(self, test_frame1): - sql.to_sql(test_frame1, "test_frame_roundtrip", con=self.conn) - result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn) + tm.assert_frame_equal(test_frame3, result) - # HACK! - result.index = test_frame1.index - result.set_index("level_0", inplace=True) - result.index.astype(int) - result.index.name = None - tm.assert_frame_equal(result, test_frame1) - def test_roundtrip_chunksize(self, test_frame1): - sql.to_sql( - test_frame1, - "test_frame_roundtrip", - con=self.conn, - index=False, - chunksize=2, - ) - result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn) - tm.assert_frame_equal(result, test_frame1) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_series(conn, request): + conn = request.getfixturevalue(conn) + if sql.has_table("test_series", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_series") - def test_execute_sql(self): - # drop_sql = "DROP TABLE IF EXISTS test" # should already be done - with sql.pandasSQL_builder(self.conn) as pandas_sql: - iris_results = pandas_sql.execute("SELECT * FROM iris") + s = Series(np.arange(5, dtype="int64"), name="series") + sql.to_sql(s, "test_series", conn, index=False) + s2 = sql.read_sql_query("SELECT * FROM test_series", conn) + tm.assert_frame_equal(s.to_frame(), s2) + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_roundtrip(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame_roundtrip") + + sql.to_sql(test_frame1, "test_frame_roundtrip", con=conn) + result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=conn) + + # HACK! + result.index = test_frame1.index + result.set_index("level_0", inplace=True) + result.index.astype(int) + result.index.name = None + tm.assert_frame_equal(result, test_frame1) + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_roundtrip_chunksize(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame_roundtrip") + + sql.to_sql( + test_frame1, + "test_frame_roundtrip", + con=conn, + index=False, + chunksize=2, + ) + result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=conn) + tm.assert_frame_equal(result, test_frame1) + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_execute_sql(conn, request): + # drop_sql = "DROP TABLE IF EXISTS test" # should already be done + conn = request.getfixturevalue(conn) + with sql.pandasSQL_builder(conn) as pandas_sql: + iris_results = pandas_sql.execute("SELECT * FROM iris") row = iris_results.fetchone() - tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"]) + tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"]) - def test_date_parsing(self): - # Test date parsing in read_sql - # No Parsing - df = sql.read_sql_query("SELECT * FROM types", self.conn) + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_date_parsing(conn, request): + conn_name = conn + if conn_name in {"sqlite_buildin", "sqlite_str"}: + pytest.skip("types tables not created in sqlite_buildin or sqlite_str fixture") + + conn = request.getfixturevalue(conn) + # Test date parsing in read_sql + # No Parsing + df = sql.read_sql_query("SELECT * FROM types", conn) + if not ("mysql" in conn_name or "postgres" in conn_name): assert not issubclass(df.DateCol.dtype.type, np.datetime64) - df = sql.read_sql_query( - "SELECT * FROM types", self.conn, parse_dates=["DateCol"] - ) - assert issubclass(df.DateCol.dtype.type, np.datetime64) - assert df.DateCol.tolist() == [ - Timestamp(2000, 1, 3, 0, 0, 0), - Timestamp(2000, 1, 4, 0, 0, 0), - ] + df = sql.read_sql_query("SELECT * FROM types", conn, parse_dates=["DateCol"]) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + Timestamp(2000, 1, 3, 0, 0, 0), + Timestamp(2000, 1, 4, 0, 0, 0), + ] - df = sql.read_sql_query( - "SELECT * FROM types", - self.conn, - parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}, - ) - assert issubclass(df.DateCol.dtype.type, np.datetime64) - assert df.DateCol.tolist() == [ - Timestamp(2000, 1, 3, 0, 0, 0), - Timestamp(2000, 1, 4, 0, 0, 0), - ] + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}, + ) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + Timestamp(2000, 1, 3, 0, 0, 0), + Timestamp(2000, 1, 4, 0, 0, 0), + ] - df = sql.read_sql_query( - "SELECT * FROM types", self.conn, parse_dates=["IntDateCol"] - ) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) - assert df.IntDateCol.tolist() == [ - Timestamp(1986, 12, 25, 0, 0, 0), - Timestamp(2013, 1, 1, 0, 0, 0), - ] + df = sql.read_sql_query("SELECT * FROM types", conn, parse_dates=["IntDateCol"]) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + Timestamp(1986, 12, 25, 0, 0, 0), + Timestamp(2013, 1, 1, 0, 0, 0), + ] + + df = sql.read_sql_query( + "SELECT * FROM types", conn, parse_dates={"IntDateCol": "s"} + ) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + Timestamp(1986, 12, 25, 0, 0, 0), + Timestamp(2013, 1, 1, 0, 0, 0), + ] + + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + parse_dates={"IntDateOnlyCol": "%Y%m%d"}, + ) + assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64) + assert df.IntDateOnlyCol.tolist() == [ + Timestamp("2010-10-10"), + Timestamp("2010-12-12"), + ] - df = sql.read_sql_query( - "SELECT * FROM types", self.conn, parse_dates={"IntDateCol": "s"} - ) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) - assert df.IntDateCol.tolist() == [ - Timestamp(1986, 12, 25, 0, 0, 0), - Timestamp(2013, 1, 1, 0, 0, 0), - ] - df = sql.read_sql_query( +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("error", ["ignore", "raise", "coerce"]) +@pytest.mark.parametrize( + "read_sql, text, mode", + [ + (sql.read_sql, "SELECT * FROM types", ("sqlalchemy", "fallback")), + (sql.read_sql, "types", ("sqlalchemy")), + ( + sql.read_sql_query, "SELECT * FROM types", - self.conn, - parse_dates={"IntDateOnlyCol": "%Y%m%d"}, - ) - assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64) - assert df.IntDateOnlyCol.tolist() == [ - Timestamp("2010-10-10"), - Timestamp("2010-12-12"), - ] + ("sqlalchemy", "fallback"), + ), + (sql.read_sql_table, "types", ("sqlalchemy")), + ], +) +def test_api_custom_dateparsing_error( + conn, request, read_sql, text, mode, error, types_data_frame +): + conn_name = conn + if conn_name in {"sqlite_buildin", "sqlite_str"}: + pytest.skip("types tables not created in sqlite_buildin or sqlite_str fixture") - @pytest.mark.parametrize("error", ["ignore", "raise", "coerce"]) - @pytest.mark.parametrize( - "read_sql, text, mode", - [ - (sql.read_sql, "SELECT * FROM types", ("sqlalchemy", "fallback")), - (sql.read_sql, "types", ("sqlalchemy")), - ( - sql.read_sql_query, - "SELECT * FROM types", - ("sqlalchemy", "fallback"), - ), - (sql.read_sql_table, "types", ("sqlalchemy")), - ], + conn = request.getfixturevalue(conn) + + expected = types_data_frame.astype({"DateCol": "datetime64[ns]"}) + + result = read_sql( + text, + con=conn, + parse_dates={ + "DateCol": {"errors": error}, + }, ) - def test_custom_dateparsing_error( - self, read_sql, text, mode, error, types_data_frame - ): - if self.mode in mode: - expected = types_data_frame.astype({"DateCol": "datetime64[ns]"}) + if "postgres" in conn_name: + # TODO: clean up types_data_frame fixture + result = result.drop(columns=["DateColWithTz"]) + result["BoolCol"] = result["BoolCol"].astype(int) + result["BoolColWithNull"] = result["BoolColWithNull"].astype(float) - result = read_sql( - text, - con=self.conn, - parse_dates={ - "DateCol": {"errors": error}, - }, - ) + tm.assert_frame_equal(result, expected) - tm.assert_frame_equal(result, expected) - def test_date_and_index(self): - # Test case where same column appears in parse_date and index_col +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_date_and_index(conn, request): + # Test case where same column appears in parse_date and index_col + conn_name = conn + if conn_name in {"sqlite_buildin", "sqlite_str"}: + pytest.skip("types tables not created in sqlite_buildin or sqlite_str fixture") - df = sql.read_sql_query( - "SELECT * FROM types", - self.conn, - index_col="DateCol", - parse_dates=["DateCol", "IntDateCol"], - ) + conn = request.getfixturevalue(conn) + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + index_col="DateCol", + parse_dates=["DateCol", "IntDateCol"], + ) - assert issubclass(df.index.dtype.type, np.datetime64) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert issubclass(df.index.dtype.type, np.datetime64) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) - def test_timedelta(self): - # see #6921 - df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame() - with tm.assert_produces_warning(UserWarning): - result_count = df.to_sql(name="test_timedelta", con=self.conn) - assert result_count == 2 - result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn) - tm.assert_series_equal(result["foo"], df["foo"].view("int64")) - - def test_complex_raises(self): - df = DataFrame({"a": [1 + 1j, 2j]}) - msg = "Complex datatypes not supported" - with pytest.raises(ValueError, match=msg): - assert df.to_sql("test_complex", con=self.conn) is None - @pytest.mark.parametrize( - "index_name,index_label,expected", - [ - # no index name, defaults to 'index' - (None, None, "index"), - # specifying index_label - (None, "other_label", "other_label"), - # using the index name - ("index_name", None, "index_name"), - # has index name, but specifying index_label - ("index_name", "other_label", "other_label"), - # index name is integer - (0, None, "0"), - # index name is None but index label is integer - (None, 0, "0"), - ], - ) - def test_to_sql_index_label(self, index_name, index_label, expected): - temp_frame = DataFrame({"col1": range(4)}) - temp_frame.index.name = index_name - query = "SELECT * FROM test_index_label" - sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label) - frame = sql.read_sql_query(query, self.conn) - assert frame.columns[0] == expected - - def test_to_sql_index_label_multiindex(self): - expected_row_count = 4 - temp_frame = DataFrame( - {"col1": range(4)}, - index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]), - ) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_timedelta(conn, request): + # see #6921 + conn = request.getfixturevalue(conn) + if sql.has_table("test_timedelta", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_timedelta") - # no index name, defaults to 'level_0' and 'level_1' - result = sql.to_sql(temp_frame, "test_index_label", self.conn) - assert result == expected_row_count - frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn) - assert frame.columns[0] == "level_0" - assert frame.columns[1] == "level_1" + df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame() + with tm.assert_produces_warning(UserWarning): + result_count = df.to_sql(name="test_timedelta", con=conn) + assert result_count == 2 + result = sql.read_sql_query("SELECT * FROM test_timedelta", conn) + tm.assert_series_equal(result["foo"], df["foo"].view("int64")) + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_complex_raises(conn, request): + conn = request.getfixturevalue(conn) + df = DataFrame({"a": [1 + 1j, 2j]}) + msg = "Complex datatypes not supported" + with pytest.raises(ValueError, match=msg): + assert df.to_sql("test_complex", con=conn) is None - # specifying index_label - result = sql.to_sql( - temp_frame, - "test_index_label", - self.conn, - if_exists="replace", - index_label=["A", "B"], - ) - assert result == expected_row_count - frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn) - assert frame.columns[:2].tolist() == ["A", "B"] +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize( + "index_name,index_label,expected", + [ + # no index name, defaults to 'index' + (None, None, "index"), + # specifying index_label + (None, "other_label", "other_label"), # using the index name - temp_frame.index.names = ["A", "B"] - result = sql.to_sql( - temp_frame, "test_index_label", self.conn, if_exists="replace" + ("index_name", None, "index_name"), + # has index name, but specifying index_label + ("index_name", "other_label", "other_label"), + # index name is integer + (0, None, "0"), + # index name is None but index label is integer + (None, 0, "0"), + ], +) +def test_api_to_sql_index_label(conn, request, index_name, index_label, expected): + conn = request.getfixturevalue(conn) + if sql.has_table("test_index_label", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_index_label") + + temp_frame = DataFrame({"col1": range(4)}) + temp_frame.index.name = index_name + query = "SELECT * FROM test_index_label" + sql.to_sql(temp_frame, "test_index_label", conn, index_label=index_label) + frame = sql.read_sql_query(query, conn) + assert frame.columns[0] == expected + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_index_label_multiindex(conn, request): + conn_name = conn + if "mysql" in conn_name: + request.node.add_marker( + pytest.mark.xfail(reason="MySQL can fail using TEXT without length as key") ) - assert result == expected_row_count - frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn) - assert frame.columns[:2].tolist() == ["A", "B"] - # has index name, but specifying index_label - result = sql.to_sql( + conn = request.getfixturevalue(conn) + if sql.has_table("test_index_label", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_index_label") + + expected_row_count = 4 + temp_frame = DataFrame( + {"col1": range(4)}, + index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]), + ) + + # no index name, defaults to 'level_0' and 'level_1' + result = sql.to_sql(temp_frame, "test_index_label", conn) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[0] == "level_0" + assert frame.columns[1] == "level_1" + + # specifying index_label + result = sql.to_sql( + temp_frame, + "test_index_label", + conn, + if_exists="replace", + index_label=["A", "B"], + ) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["A", "B"] + + # using the index name + temp_frame.index.names = ["A", "B"] + result = sql.to_sql(temp_frame, "test_index_label", conn, if_exists="replace") + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["A", "B"] + + # has index name, but specifying index_label + result = sql.to_sql( + temp_frame, + "test_index_label", + conn, + if_exists="replace", + index_label=["C", "D"], + ) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["C", "D"] + + msg = "Length of 'index_label' should match number of levels, which is 2" + with pytest.raises(ValueError, match=msg): + sql.to_sql( temp_frame, "test_index_label", - self.conn, + conn, if_exists="replace", - index_label=["C", "D"], + index_label="C", ) - assert result == expected_row_count - frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn) - assert frame.columns[:2].tolist() == ["C", "D"] - - msg = "Length of 'index_label' should match number of levels, which is 2" - with pytest.raises(ValueError, match=msg): - sql.to_sql( - temp_frame, - "test_index_label", - self.conn, - if_exists="replace", - index_label="C", - ) - def test_multiindex_roundtrip(self): - df = DataFrame.from_records( - [(1, 2.1, "line1"), (2, 1.5, "line2")], - columns=["A", "B", "C"], - index=["A", "B"], - ) - df.to_sql(name="test_multiindex_roundtrip", con=self.conn) - result = sql.read_sql_query( - "SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"] - ) - tm.assert_frame_equal(df, result, check_index_type=True) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_multiindex_roundtrip(conn, request): + conn = request.getfixturevalue(conn) + if sql.has_table("test_multiindex_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_multiindex_roundtrip") + + df = DataFrame.from_records( + [(1, 2.1, "line1"), (2, 1.5, "line2")], + columns=["A", "B", "C"], + index=["A", "B"], + ) - @pytest.mark.parametrize( - "dtype", - [ - None, - int, - float, - {"A": int, "B": float}, - ], + df.to_sql(name="test_multiindex_roundtrip", con=conn) + result = sql.read_sql_query( + "SELECT * FROM test_multiindex_roundtrip", conn, index_col=["A", "B"] ) - def test_dtype_argument(self, dtype): - # GH10285 Add dtype argument to read_sql_query - df = DataFrame([[1.2, 3.4], [5.6, 7.8]], columns=["A", "B"]) - assert df.to_sql(name="test_dtype_argument", con=self.conn) == 2 - - expected = df.astype(dtype) - result = sql.read_sql_query( - "SELECT A, B FROM test_dtype_argument", con=self.conn, dtype=dtype - ) + tm.assert_frame_equal(df, result, check_index_type=True) - tm.assert_frame_equal(result, expected) - def test_integer_col_names(self): - df = DataFrame([[1, 2], [3, 4]], columns=[0, 1]) - sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace") +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize( + "dtype", + [ + None, + int, + float, + {"A": int, "B": float}, + ], +) +def test_api_dtype_argument(conn, request, dtype): + # GH10285 Add dtype argument to read_sql_query + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_dtype_argument", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_dtype_argument") - def test_get_schema(self, test_frame1): - create_sql = sql.get_schema(test_frame1, "test", con=self.conn) - assert "CREATE" in create_sql + df = DataFrame([[1.2, 3.4], [5.6, 7.8]], columns=["A", "B"]) + assert df.to_sql(name="test_dtype_argument", con=conn) == 2 - def test_get_schema_with_schema(self, test_frame1): - # GH28486 - create_sql = sql.get_schema(test_frame1, "test", con=self.conn, schema="pypi") - assert "CREATE TABLE pypi." in create_sql + expected = df.astype(dtype) - def test_get_schema_dtypes(self): - if self.mode == "sqlalchemy": - from sqlalchemy import Integer + if "postgres" in conn_name: + query = 'SELECT "A", "B" FROM test_dtype_argument' + else: + query = "SELECT A, B FROM test_dtype_argument" + result = sql.read_sql_query(query, con=conn, dtype=dtype) - dtype = Integer - else: - dtype = "INTEGER" + tm.assert_frame_equal(result, expected) + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_integer_col_names(conn, request): + conn = request.getfixturevalue(conn) + df = DataFrame([[1, 2], [3, 4]], columns=[0, 1]) + sql.to_sql(df, "test_frame_integer_col_names", conn, if_exists="replace") + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + create_sql = sql.get_schema(test_frame1, "test", con=conn) + assert "CREATE" in create_sql + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_with_schema(conn, request, test_frame1): + # GH28486 + conn = request.getfixturevalue(conn) + create_sql = sql.get_schema(test_frame1, "test", con=conn, schema="pypi") + assert "CREATE TABLE pypi." in create_sql + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_dtypes(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]}) + + if conn_name == "sqlite_buildin": + dtype = "INTEGER" + else: + from sqlalchemy import Integer + + dtype = Integer + create_sql = sql.get_schema(float_frame, "test", con=conn, dtype={"b": dtype}) + assert "CREATE" in create_sql + assert "INTEGER" in create_sql - float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]}) - create_sql = sql.get_schema( - float_frame, "test", con=self.conn, dtype={"b": dtype} - ) - assert "CREATE" in create_sql - assert "INTEGER" in create_sql - def test_get_schema_keys(self, test_frame1): - frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]}) - create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1") +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_keys(conn, request, test_frame1): + conn_name = conn + conn = request.getfixturevalue(conn) + frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]}) + create_sql = sql.get_schema(frame, "test", con=conn, keys="Col1") + + if "mysql" in conn_name: + constraint_sentence = "CONSTRAINT test_pk PRIMARY KEY (`Col1`)" + else: constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")' - assert constraint_sentence in create_sql + assert constraint_sentence in create_sql - # multiple columns as key (GH10385) - create_sql = sql.get_schema(test_frame1, "test", con=self.conn, keys=["A", "B"]) + # multiple columns as key (GH10385) + create_sql = sql.get_schema(test_frame1, "test", con=conn, keys=["A", "B"]) + if "mysql" in conn_name: + constraint_sentence = "CONSTRAINT test_pk PRIMARY KEY (`A`, `B`)" + else: constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")' - assert constraint_sentence in create_sql + assert constraint_sentence in create_sql - def test_chunksize_read(self): - df = DataFrame( - np.random.default_rng(2).standard_normal((22, 5)), columns=list("abcde") - ) - df.to_sql(name="test_chunksize", con=self.conn, index=False) - # reading the query in one time - res1 = sql.read_sql_query("select * from test_chunksize", self.conn) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_chunksize_read(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_chunksize", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_chunksize") + + df = DataFrame( + np.random.default_rng(2).standard_normal((22, 5)), columns=list("abcde") + ) + df.to_sql(name="test_chunksize", con=conn, index=False) + + # reading the query in one time + res1 = sql.read_sql_query("select * from test_chunksize", conn) + + # reading the query in chunks with read_sql_query + res2 = DataFrame() + i = 0 + sizes = [5, 5, 5, 5, 2] + + for chunk in sql.read_sql_query("select * from test_chunksize", conn, chunksize=5): + res2 = concat([res2, chunk], ignore_index=True) + assert len(chunk) == sizes[i] + i += 1 - # reading the query in chunks with read_sql_query - res2 = DataFrame() + tm.assert_frame_equal(res1, res2) + + # reading the query in chunks with read_sql_query + if conn_name == "sqlite_buildin": + with pytest.raises(NotImplementedError, match=""): + sql.read_sql_table("test_chunksize", conn, chunksize=5) + else: + res3 = DataFrame() i = 0 sizes = [5, 5, 5, 5, 2] - for chunk in sql.read_sql_query( - "select * from test_chunksize", self.conn, chunksize=5 - ): - res2 = concat([res2, chunk], ignore_index=True) + for chunk in sql.read_sql_table("test_chunksize", conn, chunksize=5): + res3 = concat([res3, chunk], ignore_index=True) assert len(chunk) == sizes[i] i += 1 - tm.assert_frame_equal(res1, res2) + tm.assert_frame_equal(res1, res3) - # reading the query in chunks with read_sql_query - if self.mode == "sqlalchemy": - res3 = DataFrame() - i = 0 - sizes = [5, 5, 5, 5, 2] - for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5): - res3 = concat([res3, chunk], ignore_index=True) - assert len(chunk) == sizes[i] - i += 1 +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_categorical(conn, request): + # GH8624 + # test that categorical gets written correctly as dense column + conn = request.getfixturevalue(conn) + if sql.has_table("test_categorical", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_categorical") - tm.assert_frame_equal(res1, res3) + df = DataFrame( + { + "person_id": [1, 2, 3], + "person_name": ["John P. Doe", "Jane Dove", "John P. Doe"], + } + ) + df2 = df.copy() + df2["person_name"] = df2["person_name"].astype("category") - def test_categorical(self): - # GH8624 - # test that categorical gets written correctly as dense column - df = DataFrame( - { - "person_id": [1, 2, 3], - "person_name": ["John P. Doe", "Jane Dove", "John P. Doe"], - } - ) - df2 = df.copy() - df2["person_name"] = df2["person_name"].astype("category") + df2.to_sql(name="test_categorical", con=conn, index=False) + res = sql.read_sql_query("SELECT * FROM test_categorical", conn) - df2.to_sql(name="test_categorical", con=self.conn, index=False) - res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn) + tm.assert_frame_equal(res, df) - tm.assert_frame_equal(res, df) - def test_unicode_column_name(self): - # GH 11431 - df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"]) - df.to_sql(name="test_unicode", con=self.conn, index=False) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_unicode_column_name(conn, request): + # GH 11431 + conn = request.getfixturevalue(conn) + if sql.has_table("test_unicode", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_unicode") - def test_escaped_table_name(self): - # GH 13206 - df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]}) - df.to_sql(name="d1187b08-4943-4c8d-a7f6", con=self.conn, index=False) + df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"]) + df.to_sql(name="test_unicode", con=conn, index=False) - res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn) - tm.assert_frame_equal(res, df) +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_escaped_table_name(conn, request): + # GH 13206 + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("d1187b08-4943-4c8d-a7f6", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("d1187b08-4943-4c8d-a7f6") - def test_read_sql_duplicate_columns(self): - # GH#53117 - df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": 1}) - df.to_sql(name="test_table", con=self.conn, index=False) + df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]}) + df.to_sql(name="d1187b08-4943-4c8d-a7f6", con=conn, index=False) - result = pd.read_sql("SELECT a, b, a +1 as a, c FROM test_table;", self.conn) - expected = DataFrame( - [[1, 0.1, 2, 1], [2, 0.2, 3, 1], [3, 0.3, 4, 1]], - columns=["a", "b", "a", "c"], - ) - tm.assert_frame_equal(result, expected) + if "postgres" in conn_name: + query = 'SELECT * FROM "d1187b08-4943-4c8d-a7f6"' + else: + query = "SELECT * FROM `d1187b08-4943-4c8d-a7f6`" + res = sql.read_sql_query(query, conn) + + tm.assert_frame_equal(res, df) + + +@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_api_read_sql_duplicate_columns(conn, request): + # GH#53117 + conn = request.getfixturevalue(conn) + if sql.has_table("test_table", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_table") + + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": 1}) + df.to_sql(name="test_table", con=conn, index=False) + + result = pd.read_sql("SELECT a, b, a +1 as a, c FROM test_table;", conn) + expected = DataFrame( + [[1, 0.1, 2, 1], [2, 0.2, 3, 1], [3, 0.3, 4, 1]], + columns=["a", "b", "a", "c"], + ) + tm.assert_frame_equal(result, expected) + + +class _TestSQLApi(PandasSQLTest): + """ + Base class to test the public API. + + From this two classes are derived to run these tests for both the + sqlalchemy mode (`TestSQLApi`) and the fallback mode + (`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific + tests for the different sql flavours are included in `_TestSQLAlchemy`. + + Notes: + flavor can always be passed even in SQLAlchemy mode, + should be correctly ignored. + + we don't use drop_table because that isn't part of the public api + + """ + + flavor = "sqlite" + mode: str + + @pytest.fixture(autouse=True) + def setup_method(self, iris_path, types_data): + self.conn = self.connect() + self.load_iris_data(iris_path) + self.load_types_data(types_data) + self.load_test_data_and_sql() + + def load_test_data_and_sql(self): + create_and_load_iris_view(self.conn) @pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed") From d04747c367d00ee03c5d008ce5670892d450e801 Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Thu, 7 Sep 2023 12:32:52 -0400 Subject: [PATCH 39/93] REGR: DataFrameGroupBy.agg with duplicate column names and a dict (#55042) --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/apply.py | 8 +++++++- pandas/tests/groupby/aggregate/test_aggregate.py | 12 ++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index fe511b5cdec67..42af61be26355 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.__setitem__` raising ``AssertionError`` when setting a :class:`Series` with a partial :class:`MultiIndex` (:issue:`54875`) - Fixed regression in :meth:`DataFrame.filter` not respecting the order of elements for ``filter`` (:issue:`54980`) - Fixed regression in :meth:`DataFrame.to_sql` not roundtripping datetime columns correctly for sqlite (:issue:`54877`) +- Fixed regression in :meth:`DataFrameGroupBy.agg` when aggregating a DataFrame with duplicate column names using a dictionary (:issue:`55006`) - Fixed regression in :meth:`MultiIndex.append` raising when appending overlapping :class:`IntervalIndex` levels (:issue:`54934`) - Fixed regression in :meth:`Series.drop_duplicates` for PyArrow strings (:issue:`54904`) - Fixed regression in :meth:`Series.interpolate` raising when ``fill_value`` was given (:issue:`54920`) diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 4d6dd8f4fd577..26467a4a982fa 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -436,7 +436,13 @@ def compute_dict_like( Data for result. When aggregating with a Series, this can contain any Python object. """ + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, + ) + obj = self.obj + is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) func = cast(AggFuncTypeDict, self.func) func = self.normalize_dictlike_arg(op_name, selected_obj, func) @@ -450,7 +456,7 @@ def compute_dict_like( colg = obj._gotitem(selection, ndim=1) results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()] keys = list(func.keys()) - elif is_non_unique_col: + elif not is_groupby and is_non_unique_col: # key used for column selection and output # GH#51099 results = [] diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index c01ca4922a84b..882f42ff18bdd 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -515,6 +515,18 @@ def test_groupby_agg_dict_with_getitem(): tm.assert_frame_equal(result, expected) +def test_groupby_agg_dict_dup_columns(): + # GH#55006 + df = DataFrame( + [[1, 2, 3, 4], [1, 3, 4, 5], [2, 4, 5, 6]], + columns=["a", "b", "c", "c"], + ) + gb = df.groupby("a") + result = gb.agg({"b": "sum"}) + expected = DataFrame({"b": [5, 4]}, index=Index([1, 2], name="a")) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( "op", [ From 3334832eb9a8dec8928e197a2930dba018dd6160 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 8 Sep 2023 16:31:07 +0200 Subject: [PATCH 40/93] BLD: improvements to meson.build files (#54949) * BLD: some changes to make meson.build more idiomatic - Use `pure: false` only in a single place. This is recommended for robustness, this way you can't forget it in a subdirectory and end up with a subtly broken package only on niche Linux distros that split purelib and platlib directories. - Use `py.install_sources` with a list input rather than in a foreach loop. - Remove the `werror` comment: it's never a good idea to enable `-Werror` by default in the build config of a library, that can easily break builds. This should be done in one or more CI jobs instead. * BLD: run `generate_version.py` with a shebang, not 'python' The way this was before can result in build failures. It assumed that `python` is a working Python 3.x interpreter, and that is not always true. See for example this bug report for the exact same thing in NumPy, where `python` isn't working for Sage: https://github.com/numpy/numpy/issues/24514 Meson guarantees that .py scripts with a shebang on the top line will be run with a Python interpreter (if there's none on the PATH, it can use the one Meson itself is run with). Hence this is the most robust way of using `run_command` on a .py script. --- generate_version.py | 2 ++ meson.build | 16 +++++++++------- pandas/_libs/meson.build | 7 ++++--- pandas/_libs/tslibs/meson.build | 7 ++++--- pandas/meson.build | 9 +++------ 5 files changed, 22 insertions(+), 19 deletions(-) diff --git a/generate_version.py b/generate_version.py index 46e9f52bfc5de..06e38ce0fd978 100644 --- a/generate_version.py +++ b/generate_version.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + # Note: This file has to live next to setup.py or versioneer will not work import argparse import os diff --git a/meson.build b/meson.build index 09a1494135af4..e0e533ffade97 100644 --- a/meson.build +++ b/meson.build @@ -2,19 +2,17 @@ project( 'pandas', 'c', 'cpp', 'cython', - version: run_command(['python', 'generate_version.py', '--print'], check: true).stdout().strip(), + version: run_command(['generate_version.py', '--print'], check: true).stdout().strip(), license: 'BSD-3', meson_version: '>=1.0.1', default_options: [ 'buildtype=release', - # TODO: Reactivate werror, some warnings on Windows - #'werror=true', 'c_std=c99' ] ) fs = import('fs') -py = import('python').find_installation() +py = import('python').find_installation(pure: false) tempita = files('generate_pxi.py') versioneer = files('generate_version.py') @@ -30,7 +28,7 @@ add_project_arguments('-DNPY_TARGET_VERSION=NPY_1_21_API_VERSION', language : 'c if fs.exists('_version_meson.py') - py.install_sources('_version_meson.py', pure: false, subdir: 'pandas') + py.install_sources('_version_meson.py', subdir: 'pandas') else custom_target('write_version_file', output: '_version_meson.py', @@ -40,11 +38,15 @@ else build_by_default: true, build_always_stale: true, install: true, - install_dir: py.get_install_dir(pure: false) / 'pandas' + install_dir: py.get_install_dir() / 'pandas' ) meson.add_dist_script(py, versioneer, '-o', '_version_meson.py') endif # Needed by pandas.test() when it looks for the pytest ini options -py.install_sources('pyproject.toml', pure: false, subdir: 'pandas') +py.install_sources( + 'pyproject.toml', + subdir: 'pandas' +) + subdir('pandas') diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build index c0a9d1ad8ee4a..1cf2c4343d844 100644 --- a/pandas/_libs/meson.build +++ b/pandas/_libs/meson.build @@ -114,8 +114,9 @@ foreach ext_name, ext_dict : libs_sources ) endforeach -py.install_sources('__init__.py', - pure: false, - subdir: 'pandas/_libs') +py.install_sources( + '__init__.py', + subdir: 'pandas/_libs' +) subdir('window') diff --git a/pandas/_libs/tslibs/meson.build b/pandas/_libs/tslibs/meson.build index 14d2eef46da20..167695b84514c 100644 --- a/pandas/_libs/tslibs/meson.build +++ b/pandas/_libs/tslibs/meson.build @@ -31,6 +31,7 @@ foreach ext_name, ext_dict : tslibs_sources ) endforeach -py.install_sources('__init__.py', - pure: false, - subdir: 'pandas/_libs/tslibs') +py.install_sources( + '__init__.py', + subdir: 'pandas/_libs/tslibs' +) diff --git a/pandas/meson.build b/pandas/meson.build index 1dc9955aa4ff6..f02258c98d46a 100644 --- a/pandas/meson.build +++ b/pandas/meson.build @@ -40,8 +40,9 @@ subdirs_list = [ 'util' ] foreach subdir: subdirs_list - install_subdir(subdir, install_dir: py.get_install_dir(pure: false) / 'pandas') + install_subdir(subdir, install_dir: py.get_install_dir() / 'pandas') endforeach + top_level_py_list = [ '__init__.py', '_typing.py', @@ -49,8 +50,4 @@ top_level_py_list = [ 'conftest.py', 'testing.py' ] -foreach file: top_level_py_list - py.install_sources(file, - pure: false, - subdir: 'pandas') -endforeach +py.install_sources(top_level_py_list, subdir: 'pandas') From 5c7abca175c92f2dab7331e9f63a58b4e0d29c7a Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Fri, 8 Sep 2023 12:41:42 -0400 Subject: [PATCH 41/93] Add TODO note to BlockManager.fast_xs for EA dtypes (#55039) --- pandas/core/internals/managers.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 4cb7b610074ba..b1db2d2e708e8 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -969,6 +969,10 @@ def fast_xs(self, loc: int) -> SingleBlockManager: n = len(self) if isinstance(dtype, ExtensionDtype): + # TODO: use object dtype as workaround for non-performant + # EA.__setitem__ methods. (primarily ArrowExtensionArray.__setitem__ + # when iteratively setting individual values) + # https://github.com/pandas-dev/pandas/pull/54508#issuecomment-1675827918 result = np.empty(n, dtype=object) else: result = np.empty(n, dtype=dtype) From 711fea0bdb216f9c489a70829393e0a26671d779 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Fri, 8 Sep 2023 19:12:32 +0200 Subject: [PATCH 42/93] ENH: use shutil.which() instead of external which(1) (#54937) * ENH: update bundled pyperclip with changes from 1.8.2 release Copy the changes from upstream 1.8.2 to the bundled copy of pyperclip. The code was reformatted using black and verified using ruff. The existing modifications from pandas were preserved. * ENH: Remove Python 2 compatibility from imported pyperclip code Remove the fallback to which/where that is only necessary for Python 2 that does not feature shutil.which(). Also collapse the imports to avoid importing shutil.which() twice. It is now only imported as `_executable_exists()` to minimize the changes to the original code. * BUG: Fix pylint failure (redundant `pass`) in clipboard --- pandas/io/clipboard/__init__.py | 111 ++++++++++++++++++++++++++------ 1 file changed, 90 insertions(+), 21 deletions(-) diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index 806d42381afc6..6491849925e86 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -17,9 +17,12 @@ On Windows, no additional modules are needed. On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli commands. (These commands should come with OS X.). -On Linux, install xclip or xsel via package manager. For example, in Debian: +On Linux, install xclip, xsel, or wl-clipboard (for "wayland" sessions) via +package manager. +For example, in Debian: sudo apt-get install xclip sudo apt-get install xsel + sudo apt-get install wl-clipboard Otherwise on Linux, you will need the PyQt5 modules installed. @@ -28,12 +31,11 @@ Cygwin is currently not supported. Security Note: This module runs programs with these names: - - which - - where - pbcopy - pbpaste - xclip - xsel + - wl-copy/wl-paste - klipper - qdbus A malicious user could rename or add programs with these names, tricking @@ -41,7 +43,7 @@ """ -__version__ = "1.7.0" +__version__ = "1.8.2" import contextlib @@ -55,7 +57,7 @@ ) import os import platform -from shutil import which +from shutil import which as _executable_exists import subprocess import time import warnings @@ -74,25 +76,14 @@ EXCEPT_MSG = """ Pyperclip could not find a copy/paste mechanism for your system. For more information, please visit - https://pyperclip.readthedocs.io/en/latest/#not-implemented-error + https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error """ ENCODING = "utf-8" -# The "which" unix command finds where a command is. -if platform.system() == "Windows": - WHICH_CMD = "where" -else: - WHICH_CMD = "which" - -def _executable_exists(name): - return ( - subprocess.call( - [WHICH_CMD, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) - == 0 - ) +class PyperclipTimeoutException(PyperclipException): + pass def _stringifyText(text) -> str: @@ -229,6 +220,32 @@ def paste_xsel(primary=False): return copy_xsel, paste_xsel +def init_wl_clipboard(): + PRIMARY_SELECTION = "-p" + + def copy_wl(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + args = ["wl-copy"] + if primary: + args.append(PRIMARY_SELECTION) + if not text: + args.append("--clear") + subprocess.check_call(args, close_fds=True) + else: + p = subprocess.Popen(args, stdin=subprocess.PIPE, close_fds=True) + p.communicate(input=text.encode(ENCODING)) + + def paste_wl(primary=False): + args = ["wl-paste", "-n"] + if primary: + args.append(PRIMARY_SELECTION) + p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True) + stdout, _stderr = p.communicate() + return stdout.decode(ENCODING) + + return copy_wl, paste_wl + + def init_klipper_clipboard(): def copy_klipper(text): text = _stringifyText(text) # Converts non-str values to str. @@ -534,7 +551,7 @@ def determine_clipboard(): return init_windows_clipboard() if platform.system() == "Linux": - if which("wslconfig.exe"): + if _executable_exists("wslconfig.exe"): return init_wsl_clipboard() # Setup for the macOS platform: @@ -549,6 +566,8 @@ def determine_clipboard(): # Setup for the LINUX platform: if HAS_DISPLAY: + if os.environ.get("WAYLAND_DISPLAY") and _executable_exists("wl-copy"): + return init_wl_clipboard() if _executable_exists("xsel"): return init_xsel_clipboard() if _executable_exists("xclip"): @@ -602,6 +621,7 @@ def set_clipboard(clipboard): "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5' "xclip": init_xclip_clipboard, "xsel": init_xsel_clipboard, + "wl-clipboard": init_wl_clipboard, "klipper": init_klipper_clipboard, "windows": init_windows_clipboard, "no": init_no_clipboard, @@ -671,7 +691,56 @@ def is_available() -> bool: copy, paste = lazy_load_stub_copy, lazy_load_stub_paste -__all__ = ["copy", "paste", "set_clipboard", "determine_clipboard"] +def waitForPaste(timeout=None): + """This function call blocks until a non-empty text string exists on the + clipboard. It returns this text. + + This function raises PyperclipTimeoutException if timeout was set to + a number of seconds that has elapsed without non-empty text being put on + the clipboard.""" + startTime = time.time() + while True: + clipboardText = paste() + if clipboardText != "": + return clipboardText + time.sleep(0.01) + + if timeout is not None and time.time() > startTime + timeout: + raise PyperclipTimeoutException( + "waitForPaste() timed out after " + str(timeout) + " seconds." + ) + + +def waitForNewPaste(timeout=None): + """This function call blocks until a new text string exists on the + clipboard that is different from the text that was there when the function + was first called. It returns this text. + + This function raises PyperclipTimeoutException if timeout was set to + a number of seconds that has elapsed without non-empty text being put on + the clipboard.""" + startTime = time.time() + originalText = paste() + while True: + currentText = paste() + if currentText != originalText: + return currentText + time.sleep(0.01) + + if timeout is not None and time.time() > startTime + timeout: + raise PyperclipTimeoutException( + "waitForNewPaste() timed out after " + str(timeout) + " seconds." + ) + + +__all__ = [ + "copy", + "paste", + "waitForPaste", + "waitForNewPaste", + "set_clipboard", + "determine_clipboard", +] # pandas aliases clipboard_get = paste From 5cf6e74e198ba34c8f64bed32082fddc2292986d Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Fri, 8 Sep 2023 10:32:20 -1000 Subject: [PATCH 43/93] BUG: read_csv(on_bad_lines='warn') did not raise a Python warning (#55071) --- doc/source/whatsnew/v2.2.0.rst | 1 + pandas/_libs/parsers.pyx | 13 +++++-- pandas/io/parsers/python_parser.py | 11 ++++-- .../io/parser/common/test_read_errors.py | 39 +++++++------------ pandas/tests/io/parser/test_c_parser_only.py | 30 +++++++------- .../io/parser/test_python_parser_only.py | 12 +++--- pandas/tests/io/parser/test_textreader.py | 16 ++++---- 7 files changed, 62 insertions(+), 60 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 7bb4aaec0dd7c..a795514aa31f8 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -227,6 +227,7 @@ MultiIndex I/O ^^^ +- Bug in :func:`read_csv` where ``on_bad_lines="warn"`` would write to ``stderr`` instead of raise a Python warning. This now yields a :class:`.errors.ParserWarning` (:issue:`54296`) - Bug in :func:`read_excel`, with ``engine="xlrd"`` (``xls`` files) erroring when file contains NaNs/Infs (:issue:`54564`) Period diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 6d66e21ce49f5..5f51f48b43ca9 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -6,7 +6,6 @@ from csv import ( QUOTE_NONE, QUOTE_NONNUMERIC, ) -import sys import time import warnings @@ -880,9 +879,15 @@ cdef class TextReader: cdef _check_tokenize_status(self, int status): if self.parser.warn_msg != NULL: - print(PyUnicode_DecodeUTF8( - self.parser.warn_msg, strlen(self.parser.warn_msg), - self.encoding_errors), file=sys.stderr) + warnings.warn( + PyUnicode_DecodeUTF8( + self.parser.warn_msg, + strlen(self.parser.warn_msg), + self.encoding_errors + ), + ParserWarning, + stacklevel=find_stack_level() + ) free(self.parser.warn_msg) self.parser.warn_msg = NULL diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 6846ea2b196b8..43fb4ec3b55fc 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -13,7 +13,6 @@ import csv from io import StringIO import re -import sys from typing import ( IO, TYPE_CHECKING, @@ -21,6 +20,7 @@ Literal, cast, ) +import warnings import numpy as np @@ -28,8 +28,10 @@ from pandas.errors import ( EmptyDataError, ParserError, + ParserWarning, ) from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_bool_dtype, @@ -778,8 +780,11 @@ def _alert_malformed(self, msg: str, row_num: int) -> None: if self.on_bad_lines == self.BadLineHandleMethod.ERROR: raise ParserError(msg) if self.on_bad_lines == self.BadLineHandleMethod.WARN: - base = f"Skipping line {row_num}: " - sys.stderr.write(base + msg + "\n") + warnings.warn( + f"Skipping line {row_num}: {msg}\n", + ParserWarning, + stacklevel=find_stack_level(), + ) def _next_iter_line(self, row_num: int) -> list[Scalar] | None: """ diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py index 492b4d5ec058e..0c5a2e0d04e5a 100644 --- a/pandas/tests/io/parser/common/test_read_errors.py +++ b/pandas/tests/io/parser/common/test_read_errors.py @@ -15,6 +15,7 @@ from pandas.errors import ( EmptyDataError, ParserError, + ParserWarning, ) from pandas import DataFrame @@ -129,18 +130,16 @@ def test_unexpected_keyword_parameter_exception(all_parsers): parser.read_table("foo.tsv", foo=1) -def test_suppress_error_output(all_parsers, capsys): +def test_suppress_error_output(all_parsers): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) - result = parser.read_csv(StringIO(data), on_bad_lines="skip") + with tm.assert_produces_warning(None): + result = parser.read_csv(StringIO(data), on_bad_lines="skip") tm.assert_frame_equal(result, expected) - captured = capsys.readouterr() - assert captured.err == "" - def test_error_bad_lines(all_parsers): # see gh-15925 @@ -152,19 +151,18 @@ def test_error_bad_lines(all_parsers): parser.read_csv(StringIO(data), on_bad_lines="error") -def test_warn_bad_lines(all_parsers, capsys): +def test_warn_bad_lines(all_parsers): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) - result = parser.read_csv(StringIO(data), on_bad_lines="warn") + with tm.assert_produces_warning( + ParserWarning, match="Skipping line", check_stacklevel=False + ): + result = parser.read_csv(StringIO(data), on_bad_lines="warn") tm.assert_frame_equal(result, expected) - captured = capsys.readouterr() - assert "Skipping line 3" in captured.err - assert "Skipping line 5" in captured.err - def test_read_csv_wrong_num_columns(all_parsers): # Too few columns. @@ -245,7 +243,7 @@ def test_bad_header_uniform_error(all_parsers): parser.read_csv(StringIO(data), index_col=0, on_bad_lines="error") -def test_on_bad_lines_warn_correct_formatting(all_parsers, capsys): +def test_on_bad_lines_warn_correct_formatting(all_parsers): # see gh-15925 parser = all_parsers data = """1,2 @@ -256,17 +254,8 @@ def test_on_bad_lines_warn_correct_formatting(all_parsers, capsys): """ expected = DataFrame({"1": "a", "2": ["b"] * 2}) - result = parser.read_csv(StringIO(data), on_bad_lines="warn") + with tm.assert_produces_warning( + ParserWarning, match="Skipping line", check_stacklevel=False + ): + result = parser.read_csv(StringIO(data), on_bad_lines="warn") tm.assert_frame_equal(result, expected) - - captured = capsys.readouterr() - if parser.engine == "c": - warn = """Skipping line 3: expected 2 fields, saw 3 -Skipping line 4: expected 2 fields, saw 3 - -""" - else: - warn = """Skipping line 3: Expected 2 fields in line 3, saw 3 -Skipping line 4: Expected 2 fields in line 4, saw 3 -""" - assert captured.err == warn diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 32a010b3aeb34..18eee01f87621 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -19,7 +19,10 @@ from pandas.compat import is_ci_environment from pandas.compat.numpy import np_version_gte1p24 -from pandas.errors import ParserError +from pandas.errors import ( + ParserError, + ParserWarning, +) import pandas.util._test_decorators as td from pandas import ( @@ -461,7 +464,7 @@ def test_data_after_quote(c_parser_only): tm.assert_frame_equal(result, expected) -def test_comment_whitespace_delimited(c_parser_only, capsys): +def test_comment_whitespace_delimited(c_parser_only): parser = c_parser_only test_input = """\ 1 2 @@ -474,18 +477,17 @@ def test_comment_whitespace_delimited(c_parser_only, capsys): 8# 1 field, NaN 9 2 3 # skipped line # comment""" - df = parser.read_csv( - StringIO(test_input), - comment="#", - header=None, - delimiter="\\s+", - skiprows=0, - on_bad_lines="warn", - ) - captured = capsys.readouterr() - # skipped lines 2, 3, 4, 9 - for line_num in (2, 3, 4, 9): - assert f"Skipping line {line_num}" in captured.err + with tm.assert_produces_warning( + ParserWarning, match="Skipping line", check_stacklevel=False + ): + df = parser.read_csv( + StringIO(test_input), + comment="#", + header=None, + delimiter="\\s+", + skiprows=0, + on_bad_lines="warn", + ) expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]]) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py index 959b988e208c1..dbd474c6ae0b9 100644 --- a/pandas/tests/io/parser/test_python_parser_only.py +++ b/pandas/tests/io/parser/test_python_parser_only.py @@ -274,7 +274,7 @@ def test_multi_char_sep_quotes(python_parser_only, quoting): parser.read_csv(StringIO(data), quoting=quoting, **kwargs) -def test_none_delimiter(python_parser_only, capsys): +def test_none_delimiter(python_parser_only): # see gh-13374 and gh-17465 parser = python_parser_only data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9" @@ -283,12 +283,14 @@ def test_none_delimiter(python_parser_only, capsys): # We expect the third line in the data to be # skipped because it is malformed, but we do # not expect any errors to occur. - result = parser.read_csv(StringIO(data), header=0, sep=None, on_bad_lines="warn") + with tm.assert_produces_warning( + ParserWarning, match="Skipping line 3", check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), header=0, sep=None, on_bad_lines="warn" + ) tm.assert_frame_equal(result, expected) - captured = capsys.readouterr() - assert "Skipping line 3" in captured.err - @pytest.mark.parametrize("data", ['a\n1\n"b"a', 'a,b,c\ncat,foo,bar\ndog,foo,"baz']) @pytest.mark.parametrize("skipfooter", [0, 1]) diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index f150ed3903443..e2d785a38eb51 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -12,6 +12,7 @@ import pandas._libs.parsers as parser from pandas._libs.parsers import TextReader +from pandas.errors import ParserWarning from pandas import DataFrame import pandas._testing as tm @@ -125,7 +126,7 @@ def test_integer_thousands_alt(self): expected = DataFrame([123456, 12500]) tm.assert_frame_equal(result, expected) - def test_skip_bad_lines(self, capsys): + def test_skip_bad_lines(self): # too many lines, see #2430 for why data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r" @@ -145,14 +146,11 @@ def test_skip_bad_lines(self, capsys): } assert_array_dicts_equal(result, expected) - reader = TextReader( - StringIO(data), delimiter=":", header=None, on_bad_lines=1 # Warn - ) - reader.read() - captured = capsys.readouterr() - - assert "Skipping line 4" in captured.err - assert "Skipping line 6" in captured.err + with tm.assert_produces_warning(ParserWarning, match="Skipping line"): + reader = TextReader( + StringIO(data), delimiter=":", header=None, on_bad_lines=1 # Warn + ) + reader.read() def test_header_not_enough_lines(self): data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6" From 9aa3f95a87eab4afe5dbd8cba8cd99432ec12899 Mon Sep 17 00:00:00 2001 From: Ben Greiner Date: Fri, 8 Sep 2023 22:33:47 +0200 Subject: [PATCH 44/93] COMPAT: bump pyarrow min version for div on duration (#55048) COMPAT: bump pyarrow min version test for div on duration --- pandas/compat/__init__.py | 2 ++ pandas/compat/pyarrow.py | 2 ++ pandas/tests/extension/test_arrow.py | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index be0a762642e46..684e9dccdc0f9 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -30,6 +30,7 @@ pa_version_under9p0, pa_version_under11p0, pa_version_under13p0, + pa_version_under14p0, ) if TYPE_CHECKING: @@ -186,6 +187,7 @@ def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]: "pa_version_under9p0", "pa_version_under11p0", "pa_version_under13p0", + "pa_version_under14p0", "IS64", "ISMUSL", "PY310", diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index 049ce50920e28..12f58be109d98 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -15,6 +15,7 @@ pa_version_under11p0 = _palv < Version("11.0.0") pa_version_under12p0 = _palv < Version("12.0.0") pa_version_under13p0 = _palv < Version("13.0.0") + pa_version_under14p0 = _palv < Version("14.0.0") except ImportError: pa_version_under7p0 = True pa_version_under8p0 = True @@ -23,3 +24,4 @@ pa_version_under11p0 = True pa_version_under12p0 = True pa_version_under13p0 = True + pa_version_under14p0 = True diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index fa6e85ba204d2..2e98eea3cac8a 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -40,6 +40,7 @@ pa_version_under9p0, pa_version_under11p0, pa_version_under13p0, + pa_version_under14p0, ) from pandas.core.dtypes.dtypes import ( @@ -917,7 +918,7 @@ def _is_temporal_supported(self, opname, pa_dtype): or ( opname in ("__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__") - and not pa_version_under13p0 + and not pa_version_under14p0 ) ) and pa.types.is_duration(pa_dtype) From 723feb984e6516e3e1798d3c4440c844b12ea18f Mon Sep 17 00:00:00 2001 From: Matheus Felipe Date: Sat, 9 Sep 2023 17:44:39 -0300 Subject: [PATCH 45/93] TYP/DOC: fix flavor param with incorrect type hint in read_html (#55076) resolve #55059 --- pandas/io/html.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pandas/io/html.py b/pandas/io/html.py index 10701be4f7e0b..68d30fe5ba681 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -1033,7 +1033,7 @@ def read_html( io: FilePath | ReadBuffer[str], *, match: str | Pattern = ".+", - flavor: str | None = None, + flavor: str | Sequence[str] | None = None, header: int | Sequence[int] | None = None, index_col: int | Sequence[int] | None = None, skiprows: int | Sequence[int] | slice | None = None, @@ -1074,11 +1074,11 @@ def read_html( This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. - flavor : str, optional - The parsing engine to use. 'bs4' and 'html5lib' are synonymous with - each other, they are both there for backwards compatibility. The - default of ``None`` tries to use ``lxml`` to parse and if that fails it - falls back on ``bs4`` + ``html5lib``. + flavor : str or list-like, optional + The parsing engine (or list of parsing engines) to use. 'bs4' and + 'html5lib' are synonymous with each other, they are both there for + backwards compatibility. The default of ``None`` tries to use ``lxml`` + to parse and if that fails it falls back on ``bs4`` + ``html5lib``. header : int or list-like, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to From 20e87cb343a18d6af4085ab6b368d433ca07be8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 08:23:09 -0700 Subject: [PATCH 46/93] Bump actions/checkout from 3 to 4 (#55086) Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/code-checks.yml | 8 ++++---- .github/workflows/codeql.yml | 2 +- .github/workflows/comment-commands.yml | 2 +- .github/workflows/docbuild-and-upload.yml | 2 +- .github/workflows/package-checks.yml | 4 ++-- .github/workflows/unit-tests.yml | 6 +++--- .github/workflows/wheels.yml | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index f87aef5385898..3bd68c07dcbc3 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -109,7 +109,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -143,7 +143,7 @@ jobs: run: docker image prune -f - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -164,7 +164,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 8715c5306a3b0..2182e89731990 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -27,7 +27,7 @@ jobs: - python steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} diff --git a/.github/workflows/comment-commands.yml b/.github/workflows/comment-commands.yml index 2550d4de34a45..55dd733d25b50 100644 --- a/.github/workflows/comment-commands.yml +++ b/.github/workflows/comment-commands.yml @@ -51,7 +51,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml index e05f12ac6416a..deaf2be0a0423 100644 --- a/.github/workflows/docbuild-and-upload.yml +++ b/.github/workflows/docbuild-and-upload.yml @@ -36,7 +36,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/package-checks.yml b/.github/workflows/package-checks.yml index 04abcf4ce8816..64a94d7fde5a9 100644 --- a/.github/workflows/package-checks.yml +++ b/.github/workflows/package-checks.yml @@ -34,7 +34,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -62,7 +62,7 @@ jobs: cancel-in-progress: true steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 6410f2edd6175..f2b426269098b 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -136,7 +136,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -194,7 +194,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -330,7 +330,7 @@ jobs: PYTEST_TARGET: pandas steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 97d78a1a9afe3..83d14b51092e6 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -48,7 +48,7 @@ jobs: sdist_file: ${{ steps.save-path.outputs.sdist_name }} steps: - name: Checkout pandas - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -103,7 +103,7 @@ jobs: IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout pandas - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 From a0d4725dd14176602761a3b8edd7d6c0ce41aa08 Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Mon, 11 Sep 2023 11:29:43 -0400 Subject: [PATCH 47/93] BUG: concat(axis=1) ignoring sort parameter for DatetimeIndex (#55085) BUG: concat ignoring sort parameter for DatetimeIndex --- doc/source/whatsnew/v2.2.0.rst | 2 +- pandas/core/indexes/api.py | 1 - pandas/tests/reshape/concat/test_datetimes.py | 12 ++++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index a795514aa31f8..609f99e26cf3b 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -247,8 +247,8 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ +- Bug in :func:`concat` ignoring ``sort`` parameter when passed :class:`DatetimeIndex` indexes (:issue:`54769`) - Bug in :func:`merge` returning columns in incorrect order when left and/or right is empty (:issue:`51929`) -- Sparse ^^^^^^ diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index a8ef0e034ba9b..6a36021d9e7c5 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -288,7 +288,6 @@ def _find_common_index_dtype(inds): raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if len(dtis) == len(indexes): - sort = True result = indexes[0] elif len(dtis) > 1: diff --git a/pandas/tests/reshape/concat/test_datetimes.py b/pandas/tests/reshape/concat/test_datetimes.py index 2f50a19189987..12d28c388d508 100644 --- a/pandas/tests/reshape/concat/test_datetimes.py +++ b/pandas/tests/reshape/concat/test_datetimes.py @@ -77,23 +77,23 @@ def test_concat_datetime_timezone(self): exp_idx = DatetimeIndex( [ - "2010-12-31 15:00:00+00:00", - "2010-12-31 16:00:00+00:00", - "2010-12-31 17:00:00+00:00", "2010-12-31 23:00:00+00:00", "2011-01-01 00:00:00+00:00", "2011-01-01 01:00:00+00:00", + "2010-12-31 15:00:00+00:00", + "2010-12-31 16:00:00+00:00", + "2010-12-31 17:00:00+00:00", ] ) expected = DataFrame( [ - [np.nan, 1], - [np.nan, 2], - [np.nan, 3], [1, np.nan], [2, np.nan], [3, np.nan], + [np.nan, 1], + [np.nan, 2], + [np.nan, 3], ], index=exp_idx, columns=["a", "b"], From 8929630664bd6d4898de7c6309759ce21b9818a1 Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Mon, 11 Sep 2023 11:55:21 -0400 Subject: [PATCH 48/93] PERF: concat(axis=1) with unaligned indexes (#55084) * PERF: concat(axis=1) with unaligned indexes * whatsnew --- doc/source/whatsnew/v2.2.0.rst | 2 ++ pandas/core/indexes/api.py | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 609f99e26cf3b..e5ce0893c947b 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -158,9 +158,11 @@ Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ +- Performance improvement in :func:`concat` with ``axis=1`` and objects with unaligned indexes (:issue:`55084`) - Performance improvement in :func:`to_dict` on converting DataFrame to dictionary (:issue:`50990`) - Performance improvement in :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` when indexed by a :class:`MultiIndex` (:issue:`54835`) - Performance improvement when indexing with more than 4 keys (:issue:`54550`) +- .. --------------------------------------------------------------------------- .. _whatsnew_220.bug_fixes: diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 6a36021d9e7c5..877b8edb32520 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -239,8 +239,12 @@ def _unique_indices(inds, dtype) -> Index: Index """ if all(isinstance(ind, Index) for ind in inds): - result = inds[0].append(inds[1:]).unique() - result = result.astype(dtype, copy=False) + inds = [ind.astype(dtype, copy=False) for ind in inds] + result = inds[0].unique() + other = inds[1].append(inds[2:]) + diff = other[result.get_indexer_for(other) == -1] + if len(diff): + result = result.append(diff.unique()) if sort: result = result.sort_values() return result From debf5ace2e63b09901ec11bc9d533a9e9b40545c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Mon, 11 Sep 2023 12:13:42 -0400 Subject: [PATCH 49/93] TYP: Misc type corrections (#55078) --- pandas/_libs/tslibs/period.pyi | 2 +- pandas/_libs/tslibs/timedeltas.pyi | 7 +++-- pandas/_libs/tslibs/timestamps.pyi | 29 +++++++++-------- pandas/_typing.py | 2 +- pandas/core/reshape/pivot.py | 3 +- pandas/io/parsers/readers.py | 50 ++++++++++++++++++++++++------ 6 files changed, 64 insertions(+), 29 deletions(-) diff --git a/pandas/_libs/tslibs/period.pyi b/pandas/_libs/tslibs/period.pyi index 8826757e31c32..c85865fea8fd0 100644 --- a/pandas/_libs/tslibs/period.pyi +++ b/pandas/_libs/tslibs/period.pyi @@ -89,7 +89,7 @@ class Period(PeriodMixin): @classmethod def _from_ordinal(cls, ordinal: int, freq) -> Period: ... @classmethod - def now(cls, freq: BaseOffset = ...) -> Period: ... + def now(cls, freq: Frequency = ...) -> Period: ... def strftime(self, fmt: str) -> str: ... def to_timestamp( self, diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi index aba9b25b23154..6d993722ce1d4 100644 --- a/pandas/_libs/tslibs/timedeltas.pyi +++ b/pandas/_libs/tslibs/timedeltas.pyi @@ -14,6 +14,7 @@ from pandas._libs.tslibs import ( Tick, ) from pandas._typing import ( + Frequency, Self, npt, ) @@ -117,9 +118,9 @@ class Timedelta(timedelta): @property def asm8(self) -> np.timedelta64: ... # TODO: round/floor/ceil could return NaT? - def round(self, freq: str) -> Self: ... - def floor(self, freq: str) -> Self: ... - def ceil(self, freq: str) -> Self: ... + def round(self, freq: Frequency) -> Self: ... + def floor(self, freq: Frequency) -> Self: ... + def ceil(self, freq: Frequency) -> Self: ... @property def resolution_string(self) -> str: ... def __add__(self, other: timedelta) -> Timedelta: ... diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi index 36ae2d6d892f1..e23f01b800874 100644 --- a/pandas/_libs/tslibs/timestamps.pyi +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -8,6 +8,8 @@ from datetime import ( from time import struct_time from typing import ( ClassVar, + Literal, + TypeAlias, TypeVar, overload, ) @@ -27,6 +29,7 @@ from pandas._typing import ( ) _DatetimeT = TypeVar("_DatetimeT", bound=datetime) +_TimeZones: TypeAlias = str | _tzinfo | None | int def integer_op_not_supported(obj: object) -> TypeError: ... @@ -51,13 +54,13 @@ class Timestamp(datetime): tzinfo: _tzinfo | None = ..., *, nanosecond: int | None = ..., - tz: str | _tzinfo | None | int = ..., + tz: _TimeZones = ..., unit: str | int | None = ..., fold: int | None = ..., ) -> _DatetimeT | NaTType: ... @classmethod def _from_value_and_reso( - cls, value: int, reso: int, tz: _tzinfo | None + cls, value: int, reso: int, tz: _TimeZones ) -> Timestamp: ... @property def value(self) -> int: ... # np.int64 @@ -84,19 +87,19 @@ class Timestamp(datetime): @property def fold(self) -> int: ... @classmethod - def fromtimestamp(cls, ts: float, tz: _tzinfo | None = ...) -> Self: ... + def fromtimestamp(cls, ts: float, tz: _TimeZones = ...) -> Self: ... @classmethod def utcfromtimestamp(cls, ts: float) -> Self: ... @classmethod - def today(cls, tz: _tzinfo | str | None = ...) -> Self: ... + def today(cls, tz: _TimeZones = ...) -> Self: ... @classmethod def fromordinal( cls, ordinal: int, - tz: _tzinfo | str | None = ..., + tz: _TimeZones = ..., ) -> Self: ... @classmethod - def now(cls, tz: _tzinfo | str | None = ...) -> Self: ... + def now(cls, tz: _TimeZones = ...) -> Self: ... @classmethod def utcnow(cls) -> Self: ... # error: Signature of "combine" incompatible with supertype "datetime" @@ -131,7 +134,7 @@ class Timestamp(datetime): fold: int | None = ..., ) -> Self: ... # LSP violation: datetime.datetime.astimezone has a default value for tz - def astimezone(self, tz: _tzinfo | None) -> Self: ... # type: ignore[override] + def astimezone(self, tz: _TimeZones) -> Self: ... # type: ignore[override] def ctime(self) -> str: ... def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... @classmethod @@ -184,12 +187,12 @@ class Timestamp(datetime): def to_julian_date(self) -> np.float64: ... @property def asm8(self) -> np.datetime64: ... - def tz_convert(self, tz: _tzinfo | str | None) -> Self: ... + def tz_convert(self, tz: _TimeZones) -> Self: ... # TODO: could return NaT? def tz_localize( self, - tz: _tzinfo | str | None, - ambiguous: str = ..., + tz: _TimeZones, + ambiguous: bool | Literal["raise", "NaT"] = ..., nonexistent: TimestampNonexistent = ..., ) -> Self: ... def normalize(self) -> Self: ... @@ -197,19 +200,19 @@ class Timestamp(datetime): def round( self, freq: str, - ambiguous: bool | str = ..., + ambiguous: bool | Literal["raise", "NaT"] = ..., nonexistent: TimestampNonexistent = ..., ) -> Self: ... def floor( self, freq: str, - ambiguous: bool | str = ..., + ambiguous: bool | Literal["raise", "NaT"] = ..., nonexistent: TimestampNonexistent = ..., ) -> Self: ... def ceil( self, freq: str, - ambiguous: bool | str = ..., + ambiguous: bool | Literal["raise", "NaT"] = ..., nonexistent: TimestampNonexistent = ..., ) -> Self: ... def day_name(self, locale: str | None = ...) -> str: ... diff --git a/pandas/_typing.py b/pandas/_typing.py index 743815b91210d..c2bbebfbe2857 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -112,7 +112,7 @@ # Cannot use `Sequence` because a string is a sequence, and we don't want to # accept that. Could refine if https://github.com/python/typing/issues/256 is # resolved to differentiate between Sequence[str] and str -ListLike = Union[AnyArrayLike, list, range] +ListLike = Union[AnyArrayLike, list, tuple, range] # scalars diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index e8ca520e7b420..79354fdd12a2d 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -7,6 +7,7 @@ from typing import ( TYPE_CHECKING, Callable, + Literal, cast, ) @@ -569,7 +570,7 @@ def crosstab( margins: bool = False, margins_name: Hashable = "All", dropna: bool = True, - normalize: bool = False, + normalize: bool | Literal[0, 1, "all", "index", "columns"] = False, ) -> DataFrame: """ Compute a simple cross tabulation of two (or more) factors. diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index e0f171035e89e..e826aad478059 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -638,7 +638,10 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -697,7 +700,10 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -757,7 +763,10 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -817,7 +826,10 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -888,7 +900,10 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = "infer", names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, index_col: IndexLabel | Literal[False] | None = None, - usecols: list[HashableT] | Callable[[Hashable], bool] | None = None, + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = None, # General Parsing Configuration dtype: DtypeArg | None = None, engine: CSVEngine | None = None, @@ -983,7 +998,10 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -1040,7 +1058,10 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -1097,7 +1118,10 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -1154,7 +1178,10 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -1224,7 +1251,10 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = "infer", names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, index_col: IndexLabel | Literal[False] | None = None, - usecols: list[HashableT] | Callable[[Hashable], bool] | None = None, + usecols: list[HashableT] + | tuple[HashableT] + | Callable[[Hashable], bool] + | None = None, # General Parsing Configuration dtype: DtypeArg | None = None, engine: CSVEngine | None = None, From aadd9e3a13660a7ac0b11730130447e5b07c01d1 Mon Sep 17 00:00:00 2001 From: Natalia Mokeeva <91160475+natmokval@users.noreply.github.com> Date: Mon, 11 Sep 2023 18:14:59 +0200 Subject: [PATCH 50/93] DOC: fix an example which raises an Error in whatsnew/v0.10.0.rst (#55057) * fix an example in whatsnew/v0.10.0.rst * correct thee example in v0.10.0.rst --- doc/source/whatsnew/v0.10.0.rst | 43 +++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst index 3425986a37743..422efc1b36946 100644 --- a/doc/source/whatsnew/v0.10.0.rst +++ b/doc/source/whatsnew/v0.10.0.rst @@ -180,19 +180,36 @@ labeled the aggregated group with the end of the interval: the next day). DataFrame constructor with no columns specified. The v0.9.0 behavior (names ``X0``, ``X1``, ...) can be reproduced by specifying ``prefix='X'``: -.. ipython:: python - :okexcept: - - import io - - data = """ - a,b,c - 1,Yes,2 - 3,No,4 - """ - print(data) - pd.read_csv(io.StringIO(data), header=None) - pd.read_csv(io.StringIO(data), header=None, prefix="X") +.. code-block:: ipython + + In [6]: import io + + In [7]: data = """ + ...: a,b,c + ...: 1,Yes,2 + ...: 3,No,4 + ...: """ + ...: + + In [8]: print(data) + + a,b,c + 1,Yes,2 + 3,No,4 + + In [9]: pd.read_csv(io.StringIO(data), header=None) + Out[9]: + 0 1 2 + 0 a b c + 1 1 Yes 2 + 2 3 No 4 + + In [10]: pd.read_csv(io.StringIO(data), header=None, prefix="X") + Out[10]: + X0 X1 X2 + 0 a b c + 1 1 Yes 2 + 2 3 No 4 - Values like ``'Yes'`` and ``'No'`` are not interpreted as boolean by default, though this can be controlled by new ``true_values`` and ``false_values`` From ce5fdf0f55f47014240931a1f975f65767c2442a Mon Sep 17 00:00:00 2001 From: Thomas Li <47963215+lithomas1@users.noreply.github.com> Date: Mon, 11 Sep 2023 12:34:04 -0400 Subject: [PATCH 51/93] ENH: numba engine in df.apply (#54666) * ENH: numba engine in df.apply * fixes * more fixes * try to fix * address code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * go for green * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update type --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- doc/source/whatsnew/v2.2.0.rst | 2 +- pandas/core/_numba/executor.py | 39 ++++++++++++++++ pandas/core/apply.py | 37 +++++++++++++-- pandas/core/frame.py | 33 ++++++++++++++ pandas/tests/apply/test_frame_apply.py | 62 ++++++++++++++++++++------ 5 files changed, 155 insertions(+), 18 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index e5ce0893c947b..07be496a95adc 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -28,7 +28,7 @@ enhancement2 Other enhancements ^^^^^^^^^^^^^^^^^^ -- +- DataFrame.apply now allows the usage of numba (via ``engine="numba"``) to JIT compile the passed function, allowing for potential speedups (:issue:`54666`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/_numba/executor.py b/pandas/core/_numba/executor.py index 5cd4779907146..0a26acb7df60a 100644 --- a/pandas/core/_numba/executor.py +++ b/pandas/core/_numba/executor.py @@ -15,6 +15,45 @@ from pandas.compat._optional import import_optional_dependency +@functools.cache +def generate_apply_looper(func, nopython=True, nogil=True, parallel=False): + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + nb_compat_func = numba.extending.register_jitable(func) + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def nb_looper(values, axis): + # Operate on the first row/col in order to get + # the output shape + if axis == 0: + first_elem = values[:, 0] + dim0 = values.shape[1] + else: + first_elem = values[0] + dim0 = values.shape[0] + res0 = nb_compat_func(first_elem) + # Use np.asarray to get shape for + # https://github.com/numba/numba/issues/4202#issuecomment-1185981507 + buf_shape = (dim0,) + np.atleast_1d(np.asarray(res0)).shape + if axis == 0: + buf_shape = buf_shape[::-1] + buff = np.empty(buf_shape) + + if axis == 1: + buff[0] = res0 + for i in numba.prange(1, values.shape[0]): + buff[i] = nb_compat_func(values[i]) + else: + buff[:, 0] = res0 + for j in numba.prange(1, values.shape[1]): + buff[:, j] = nb_compat_func(values[:, j]) + return buff + + return nb_looper + + @functools.cache def make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel): if TYPE_CHECKING: diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 26467a4a982fa..78d52ed262c7a 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -49,6 +49,7 @@ ABCSeries, ) +from pandas.core._numba.executor import generate_apply_looper import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike @@ -80,6 +81,8 @@ def frame_apply( raw: bool = False, result_type: str | None = None, by_row: Literal[False, "compat"] = "compat", + engine: str = "python", + engine_kwargs: dict[str, bool] | None = None, args=None, kwargs=None, ) -> FrameApply: @@ -100,6 +103,8 @@ def frame_apply( raw=raw, result_type=result_type, by_row=by_row, + engine=engine, + engine_kwargs=engine_kwargs, args=args, kwargs=kwargs, ) @@ -756,11 +761,15 @@ def __init__( result_type: str | None, *, by_row: Literal[False, "compat"] = False, + engine: str = "python", + engine_kwargs: dict[str, bool] | None = None, args, kwargs, ) -> None: if by_row is not False and by_row != "compat": raise ValueError(f"by_row={by_row} not allowed") + self.engine = engine + self.engine_kwargs = engine_kwargs super().__init__( obj, func, raw, result_type, by_row=by_row, args=args, kwargs=kwargs ) @@ -805,6 +814,12 @@ def values(self): def apply(self) -> DataFrame | Series: """compute the results""" + + if self.engine == "numba" and not self.raw: + raise ValueError( + "The numba engine in DataFrame.apply can only be used when raw=True" + ) + # dispatch to handle list-like or dict-like if is_list_like(self.func): return self.apply_list_or_dict_like() @@ -834,7 +849,7 @@ def apply(self) -> DataFrame | Series: # raw elif self.raw: - return self.apply_raw() + return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs) return self.apply_standard() @@ -907,7 +922,7 @@ def apply_empty_result(self): else: return self.obj.copy() - def apply_raw(self): + def apply_raw(self, engine="python", engine_kwargs=None): """apply to the values as a numpy array""" def wrap_function(func): @@ -925,7 +940,23 @@ def wrapper(*args, **kwargs): return wrapper - result = np.apply_along_axis(wrap_function(self.func), self.axis, self.values) + if engine == "numba": + engine_kwargs = {} if engine_kwargs is None else engine_kwargs + + # error: Argument 1 to "__call__" of "_lru_cache_wrapper" has + # incompatible type "Callable[..., Any] | str | list[Callable + # [..., Any] | str] | dict[Hashable,Callable[..., Any] | str | + # list[Callable[..., Any] | str]]"; expected "Hashable" + nb_looper = generate_apply_looper( + self.func, **engine_kwargs # type: ignore[arg-type] + ) + result = nb_looper(self.values, self.axis) + # If we made the result 2-D, squeeze it back to 1-D + result = np.squeeze(result) + else: + result = np.apply_along_axis( + wrap_function(self.func), self.axis, self.values + ) # TODO: mixed type case if result.ndim == 2: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f1fc63bc4b1ea..8fcb91c846826 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9925,6 +9925,8 @@ def apply( result_type: Literal["expand", "reduce", "broadcast"] | None = None, args=(), by_row: Literal[False, "compat"] = "compat", + engine: Literal["python", "numba"] = "python", + engine_kwargs: dict[str, bool] | None = None, **kwargs, ): """ @@ -9984,6 +9986,35 @@ def apply( If False, the funcs will be passed the whole Series at once. .. versionadded:: 2.1.0 + + engine : {'python', 'numba'}, default 'python' + Choose between the python (default) engine or the numba engine in apply. + + The numba engine will attempt to JIT compile the passed function, + which may result in speedups for large DataFrames. + It also supports the following engine_kwargs : + + - nopython (compile the function in nopython mode) + - nogil (release the GIL inside the JIT compiled function) + - parallel (try to apply the function in parallel over the DataFrame) + + Note: The numba compiler only supports a subset of + valid Python/numpy operations. + + Please read more about the `supported python features + `_ + and `supported numpy features + `_ + in numba to learn what you can or cannot use in the passed function. + + As of right now, the numba engine can only be used with raw=True. + + .. versionadded:: 2.2.0 + + engine_kwargs : dict + Pass keyword arguments to the engine. + This is currently only used by the numba engine, + see the documentation for the engine argument for more information. **kwargs Additional keyword arguments to pass as keywords arguments to `func`. @@ -10084,6 +10115,8 @@ def apply( raw=raw, result_type=result_type, by_row=by_row, + engine=engine, + engine_kwargs=engine_kwargs, args=args, kwargs=kwargs, ) diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 3a3f73a68374b..3f2accc23e2d6 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -18,6 +18,13 @@ from pandas.tests.frame.common import zip_frames +@pytest.fixture(params=["python", "numba"]) +def engine(request): + if request.param == "numba": + pytest.importorskip("numba") + return request.param + + def test_apply(float_frame): with np.errstate(all="ignore"): # ufunc @@ -234,36 +241,42 @@ def test_apply_broadcast_series_lambda_func(int_frame_const_col): @pytest.mark.parametrize("axis", [0, 1]) -def test_apply_raw_float_frame(float_frame, axis): +def test_apply_raw_float_frame(float_frame, axis, engine): + if engine == "numba": + pytest.skip("numba can't handle when UDF returns None.") + def _assert_raw(x): assert isinstance(x, np.ndarray) assert x.ndim == 1 - float_frame.apply(_assert_raw, axis=axis, raw=True) + float_frame.apply(_assert_raw, axis=axis, engine=engine, raw=True) @pytest.mark.parametrize("axis", [0, 1]) -def test_apply_raw_float_frame_lambda(float_frame, axis): - result = float_frame.apply(np.mean, axis=axis, raw=True) +def test_apply_raw_float_frame_lambda(float_frame, axis, engine): + result = float_frame.apply(np.mean, axis=axis, engine=engine, raw=True) expected = float_frame.apply(lambda x: x.values.mean(), axis=axis) tm.assert_series_equal(result, expected) -def test_apply_raw_float_frame_no_reduction(float_frame): +def test_apply_raw_float_frame_no_reduction(float_frame, engine): # no reduction - result = float_frame.apply(lambda x: x * 2, raw=True) + result = float_frame.apply(lambda x: x * 2, engine=engine, raw=True) expected = float_frame * 2 tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("axis", [0, 1]) -def test_apply_raw_mixed_type_frame(mixed_type_frame, axis): +def test_apply_raw_mixed_type_frame(mixed_type_frame, axis, engine): + if engine == "numba": + pytest.skip("isinstance check doesn't work with numba") + def _assert_raw(x): assert isinstance(x, np.ndarray) assert x.ndim == 1 # Mixed dtype (GH-32423) - mixed_type_frame.apply(_assert_raw, axis=axis, raw=True) + mixed_type_frame.apply(_assert_raw, axis=axis, engine=engine, raw=True) def test_apply_axis1(float_frame): @@ -300,14 +313,20 @@ def test_apply_mixed_dtype_corner_indexing(): ) @pytest.mark.parametrize("raw", [True, False]) @pytest.mark.parametrize("axis", [0, 1]) -def test_apply_empty_infer_type(ax, func, raw, axis): +def test_apply_empty_infer_type(ax, func, raw, axis, engine, request): df = DataFrame(**{ax: ["a", "b", "c"]}) with np.errstate(all="ignore"): test_res = func(np.array([], dtype="f8")) is_reduction = not isinstance(test_res, np.ndarray) - result = df.apply(func, axis=axis, raw=raw) + if engine == "numba" and raw is False: + mark = pytest.mark.xfail( + reason="numba engine only supports raw=True at the moment" + ) + request.node.add_marker(mark) + + result = df.apply(func, axis=axis, engine=engine, raw=raw) if is_reduction: agg_axis = df._get_agg_axis(axis) assert isinstance(result, Series) @@ -607,8 +626,10 @@ def non_reducing_function(row): assert names == list(df.index) -def test_apply_raw_function_runs_once(): +def test_apply_raw_function_runs_once(engine): # https://github.com/pandas-dev/pandas/issues/34506 + if engine == "numba": + pytest.skip("appending to list outside of numba func is not supported") df = DataFrame({"a": [1, 2, 3]}) values = [] # Save row values function is applied to @@ -623,7 +644,7 @@ def non_reducing_function(row): for func in [reducing_function, non_reducing_function]: del values[:] - df.apply(func, raw=True, axis=1) + df.apply(func, engine=engine, raw=True, axis=1) assert values == list(df.a.to_list()) @@ -1449,10 +1470,12 @@ def test_apply_no_suffix_index(): tm.assert_frame_equal(result, expected) -def test_apply_raw_returns_string(): +def test_apply_raw_returns_string(engine): # https://github.com/pandas-dev/pandas/issues/35940 + if engine == "numba": + pytest.skip("No object dtype support in numba") df = DataFrame({"A": ["aa", "bbb"]}) - result = df.apply(lambda x: x[0], axis=1, raw=True) + result = df.apply(lambda x: x[0], engine=engine, axis=1, raw=True) expected = Series(["aa", "bbb"]) tm.assert_series_equal(result, expected) @@ -1632,3 +1655,14 @@ def test_agg_dist_like_and_nonunique_columns(): result = df.agg({"A": "count"}) expected = df["A"].count() tm.assert_series_equal(result, expected) + + +def test_numba_unsupported(): + df = DataFrame( + {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]} + ) + with pytest.raises( + ValueError, + match="The numba engine in DataFrame.apply can only be used when raw=True", + ): + df.apply(lambda x: x, engine="numba", raw=False) From 417a5e7fcfdc36385c0599f40bd1b0b8e96a3720 Mon Sep 17 00:00:00 2001 From: Rajat Subhra Mukherjee Date: Tue, 12 Sep 2023 00:46:01 +0530 Subject: [PATCH 52/93] Updated future warning msg in transform() for Series.groupby (#55082) * updated warn msg * Update apply.py --- pandas/core/apply.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 78d52ed262c7a..cc594bc8efb34 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -1863,12 +1863,12 @@ def warn_alias_replacement( full_alias = alias else: full_alias = f"{type(obj).__name__}.{alias}" - alias = f"'{alias}'" + alias = f'"{alias}"' warnings.warn( f"The provided callable {func} is currently using " f"{full_alias}. In a future version of pandas, " f"the provided callable will be used directly. To keep current " - f"behavior pass {alias} instead.", + f"behavior pass the string {alias} instead.", category=FutureWarning, stacklevel=find_stack_level(), ) From 705d4312cf1d94ef2497bcd8091e0eabd1085f4a Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 11 Sep 2023 10:21:08 -1000 Subject: [PATCH 53/93] TST: Make test_hash_equality_invariance xfail more generic (#55094) --- pandas/tests/scalar/timedelta/test_timedelta.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index f1d8acf47b29a..cb797a4168088 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -927,7 +927,6 @@ def test_timedelta_hash_equality(self): @pytest.mark.xfail( reason="pd.Timedelta violates the Python hash invariant (GH#44504).", - raises=AssertionError, ) @given( st.integers( From 79067a76adc448d17210f2cf4a858b0eb853be4c Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Wed, 13 Sep 2023 02:34:56 +0600 Subject: [PATCH 54/93] ENH: add calamine excel reader (close #50395) (#54998) --- ci/deps/actions-310.yaml | 1 + ci/deps/actions-311-downstream_compat.yaml | 1 + ci/deps/actions-311.yaml | 1 + ci/deps/actions-39-minimum_versions.yaml | 1 + ci/deps/actions-39.yaml | 1 + ci/deps/circle-310-arm64.yaml | 1 + doc/source/getting_started/install.rst | 1 + doc/source/user_guide/io.rst | 23 +++- doc/source/whatsnew/v2.2.0.rst | 23 +++- environment.yml | 1 + pandas/compat/_optional.py | 2 + pandas/core/config_init.py | 10 +- pandas/io/excel/_base.py | 16 ++- pandas/io/excel/_calamine.py | 127 +++++++++++++++++ pandas/tests/io/excel/test_readers.py | 130 ++++++++++++------ pyproject.toml | 3 +- requirements-dev.txt | 1 + scripts/tests/data/deps_expected_random.yaml | 1 + scripts/tests/data/deps_minimum.toml | 3 +- .../tests/data/deps_unmodified_random.yaml | 1 + 20 files changed, 290 insertions(+), 58 deletions(-) create mode 100644 pandas/io/excel/_calamine.py diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index 2190136220c6c..927003b13d6be 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -46,6 +46,7 @@ dependencies: - pymysql>=1.0.2 - pyreadstat>=1.1.5 - pytables>=3.7.0 + - python-calamine>=0.1.6 - pyxlsb>=1.0.9 - s3fs>=2022.05.0 - scipy>=1.8.1 diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index cf85345cb0cc2..00df41cce3bae 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -47,6 +47,7 @@ dependencies: - pymysql>=1.0.2 - pyreadstat>=1.1.5 - pytables>=3.7.0 + - python-calamine>=0.1.6 - pyxlsb>=1.0.9 - s3fs>=2022.05.0 - scipy>=1.8.1 diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index 3c1630714a041..d50ea20da1e0c 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -46,6 +46,7 @@ dependencies: - pymysql>=1.0.2 - pyreadstat>=1.1.5 # - pytables>=3.7.0, 3.8.0 is first version that supports 3.11 + - python-calamine>=0.1.6 - pyxlsb>=1.0.9 - s3fs>=2022.05.0 - scipy>=1.8.1 diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml index b1cea49e22d15..10862630bd596 100644 --- a/ci/deps/actions-39-minimum_versions.yaml +++ b/ci/deps/actions-39-minimum_versions.yaml @@ -48,6 +48,7 @@ dependencies: - pymysql=1.0.2 - pyreadstat=1.1.5 - pytables=3.7.0 + - python-calamine=0.1.6 - pyxlsb=1.0.9 - s3fs=2022.05.0 - scipy=1.8.1 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index b8a119ece4b03..904b55a813a9f 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -46,6 +46,7 @@ dependencies: - pymysql>=1.0.2 - pyreadstat>=1.1.5 - pytables>=3.7.0 + - python-calamine>=0.1.6 - pyxlsb>=1.0.9 - s3fs>=2022.05.0 - scipy>=1.8.1 diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml index 71686837451b4..4060cea73e7f6 100644 --- a/ci/deps/circle-310-arm64.yaml +++ b/ci/deps/circle-310-arm64.yaml @@ -47,6 +47,7 @@ dependencies: - pymysql>=1.0.2 # - pyreadstat>=1.1.5 not available on ARM - pytables>=3.7.0 + - python-calamine>=0.1.6 - pyxlsb>=1.0.9 - s3fs>=2022.05.0 - scipy>=1.8.1 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index ae7c9d4ea9c62..2c0787397e047 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -281,6 +281,7 @@ xlrd 2.0.1 excel Reading Excel xlsxwriter 3.0.3 excel Writing Excel openpyxl 3.0.10 excel Reading / writing for xlsx files pyxlsb 1.0.9 excel Reading for xlsb files +python-calamine 0.1.6 excel Reading for xls/xlsx/xlsb/ods files ========================= ================== =============== ============================================================= HTML diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index ecd547c5ff4d6..6bd181740c78d 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -3453,7 +3453,8 @@ Excel files The :func:`~pandas.read_excel` method can read Excel 2007+ (``.xlsx``) files using the ``openpyxl`` Python module. Excel 2003 (``.xls``) files can be read using ``xlrd``. Binary Excel (``.xlsb``) -files can be read using ``pyxlsb``. +files can be read using ``pyxlsb``. All formats can be read +using :ref:`calamine` engine. The :meth:`~DataFrame.to_excel` instance method is used for saving a ``DataFrame`` to Excel. Generally the semantics are similar to working with :ref:`csv` data. @@ -3494,6 +3495,9 @@ using internally. * For the engine odf, pandas is using :func:`odf.opendocument.load` to read in (``.ods``) files. +* For the engine calamine, pandas is using :func:`python_calamine.load_workbook` + to read in (``.xlsx``), (``.xlsm``), (``.xls``), (``.xlsb``), (``.ods``) files. + .. code-block:: python # Returns a DataFrame @@ -3935,7 +3939,8 @@ The :func:`~pandas.read_excel` method can also read binary Excel files using the ``pyxlsb`` module. The semantics and features for reading binary Excel files mostly match what can be done for `Excel files`_ using ``engine='pyxlsb'``. ``pyxlsb`` does not recognize datetime types -in files and will return floats instead. +in files and will return floats instead (you can use :ref:`calamine` +if you need recognize datetime types). .. code-block:: python @@ -3947,6 +3952,20 @@ in files and will return floats instead. Currently pandas only supports *reading* binary Excel files. Writing is not implemented. +.. _io.calamine: + +Calamine (Excel and ODS files) +------------------------------ + +The :func:`~pandas.read_excel` method can read Excel file (``.xlsx``, ``.xlsm``, ``.xls``, ``.xlsb``) +and OpenDocument spreadsheets (``.ods``) using the ``python-calamine`` module. +This module is a binding for Rust library `calamine `__ +and is faster than other engines in most cases. The optional dependency 'python-calamine' needs to be installed. + +.. code-block:: python + + # Returns a DataFrame + pd.read_excel("path_to_file.xlsb", engine="calamine") .. _io.clipboard: diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 07be496a95adc..249f08c7e387b 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -14,10 +14,27 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ -.. _whatsnew_220.enhancements.enhancement1: +.. _whatsnew_220.enhancements.calamine: -enhancement1 -^^^^^^^^^^^^ +Calamine engine for :func:`read_excel` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``calamine`` engine was added to :func:`read_excel`. +It uses ``python-calamine``, which provides Python bindings for the Rust library `calamine `__. +This engine supports Excel files (``.xlsx``, ``.xlsm``, ``.xls``, ``.xlsb``) and OpenDocument spreadsheets (``.ods``) (:issue:`50395`). + +There are two advantages of this engine: + +1. Calamine is often faster than other engines, some benchmarks show results up to 5x faster than 'openpyxl', 20x - 'odf', 4x - 'pyxlsb', and 1.5x - 'xlrd'. + But, 'openpyxl' and 'pyxlsb' are faster in reading a few rows from large files because of lazy iteration over rows. +2. Calamine supports the recognition of datetime in ``.xlsb`` files, unlike 'pyxlsb' which is the only other engine in pandas that can read ``.xlsb`` files. + +.. code-block:: python + + pd.read_excel("path_to_file.xlsb", engine="calamine") + + +For more, see :ref:`io.calamine` in the user guide on IO tools. .. _whatsnew_220.enhancements.enhancement2: diff --git a/environment.yml b/environment.yml index 1a9dffb55bca7..1eb0b4cc2c7a6 100644 --- a/environment.yml +++ b/environment.yml @@ -47,6 +47,7 @@ dependencies: - pymysql>=1.0.2 - pyreadstat>=1.1.5 - pytables>=3.7.0 + - python-calamine>=0.1.6 - pyxlsb>=1.0.9 - s3fs>=2022.05.0 - scipy>=1.8.1 diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index c5792fa1379fe..fa0e9e974ea39 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -37,6 +37,7 @@ "pyarrow": "7.0.0", "pyreadstat": "1.1.5", "pytest": "7.3.2", + "python-calamine": "0.1.6", "pyxlsb": "1.0.9", "s3fs": "2022.05.0", "scipy": "1.8.1", @@ -62,6 +63,7 @@ "lxml.etree": "lxml", "odf": "odfpy", "pandas_gbq": "pandas-gbq", + "python_calamine": "python-calamine", "sqlalchemy": "SQLAlchemy", "tables": "pytables", } diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 62455f119a02f..750b374043193 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -513,11 +513,11 @@ def use_inf_as_na_cb(key) -> None: auto, {others}. """ -_xls_options = ["xlrd"] -_xlsm_options = ["xlrd", "openpyxl"] -_xlsx_options = ["xlrd", "openpyxl"] -_ods_options = ["odf"] -_xlsb_options = ["pyxlsb"] +_xls_options = ["xlrd", "calamine"] +_xlsm_options = ["xlrd", "openpyxl", "calamine"] +_xlsx_options = ["xlrd", "openpyxl", "calamine"] +_ods_options = ["odf", "calamine"] +_xlsb_options = ["pyxlsb", "calamine"] with cf.config_prefix("io.excel.xls"): diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index b4b0f29019c31..073115cab8695 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -159,13 +159,15 @@ of dtype conversion. engine : str, default None If io is not a buffer or path, this must be set to identify io. - Supported engines: "xlrd", "openpyxl", "odf", "pyxlsb". + Supported engines: "xlrd", "openpyxl", "odf", "pyxlsb", "calamine". Engine compatibility : - "xlrd" supports old-style Excel files (.xls). - "openpyxl" supports newer Excel file formats. - "odf" supports OpenDocument file formats (.odf, .ods, .odt). - "pyxlsb" supports Binary Excel files. + - "calamine" supports Excel (.xls, .xlsx, .xlsm, .xlsb) + and OpenDocument (.ods) file formats. .. versionchanged:: 1.2.0 The engine `xlrd `_ @@ -394,7 +396,7 @@ def read_excel( | Callable[[str], bool] | None = ..., dtype: DtypeArg | None = ..., - engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ..., + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ..., converters: dict[str, Callable] | dict[int, Callable] | None = ..., true_values: Iterable[Hashable] | None = ..., false_values: Iterable[Hashable] | None = ..., @@ -433,7 +435,7 @@ def read_excel( | Callable[[str], bool] | None = ..., dtype: DtypeArg | None = ..., - engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ..., + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ..., converters: dict[str, Callable] | dict[int, Callable] | None = ..., true_values: Iterable[Hashable] | None = ..., false_values: Iterable[Hashable] | None = ..., @@ -472,7 +474,7 @@ def read_excel( | Callable[[str], bool] | None = None, dtype: DtypeArg | None = None, - engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = None, + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None, converters: dict[str, Callable] | dict[int, Callable] | None = None, true_values: Iterable[Hashable] | None = None, false_values: Iterable[Hashable] | None = None, @@ -1456,13 +1458,15 @@ class ExcelFile: .xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file. engine : str, default None If io is not a buffer or path, this must be set to identify io. - Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb`` + Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine`` Engine compatibility : - ``xlrd`` supports old-style Excel files (.xls). - ``openpyxl`` supports newer Excel file formats. - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt). - ``pyxlsb`` supports Binary Excel files. + - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb) + and OpenDocument (.ods) file formats. .. versionchanged:: 1.2.0 @@ -1498,6 +1502,7 @@ class ExcelFile: ... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP """ + from pandas.io.excel._calamine import CalamineReader from pandas.io.excel._odfreader import ODFReader from pandas.io.excel._openpyxl import OpenpyxlReader from pandas.io.excel._pyxlsb import PyxlsbReader @@ -1508,6 +1513,7 @@ class ExcelFile: "openpyxl": OpenpyxlReader, "odf": ODFReader, "pyxlsb": PyxlsbReader, + "calamine": CalamineReader, } def __init__( diff --git a/pandas/io/excel/_calamine.py b/pandas/io/excel/_calamine.py new file mode 100644 index 0000000000000..d61a9fc664164 --- /dev/null +++ b/pandas/io/excel/_calamine.py @@ -0,0 +1,127 @@ +from __future__ import annotations + +from datetime import ( + date, + datetime, + time, + timedelta, +) +from typing import ( + TYPE_CHECKING, + Any, + Union, + cast, +) + +from pandas._typing import Scalar +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +import pandas as pd +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import BaseExcelReader + +if TYPE_CHECKING: + from python_calamine import ( + CalamineSheet, + CalamineWorkbook, + ) + + from pandas._typing import ( + FilePath, + ReadBuffer, + StorageOptions, + ) + +_CellValueT = Union[int, float, str, bool, time, date, datetime, timedelta] + + +class CalamineReader(BaseExcelReader["CalamineWorkbook"]): + @doc(storage_options=_shared_docs["storage_options"]) + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Reader using calamine engine (xlsx/xls/xlsb/ods). + + Parameters + ---------- + filepath_or_buffer : str, path to be parsed or + an open readable stream. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + import_optional_dependency("python_calamine") + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[CalamineWorkbook]: + from python_calamine import CalamineWorkbook + + return CalamineWorkbook + + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any + ) -> CalamineWorkbook: + from python_calamine import load_workbook + + return load_workbook( + filepath_or_buffer, **engine_kwargs # type: ignore[arg-type] + ) + + @property + def sheet_names(self) -> list[str]: + from python_calamine import SheetTypeEnum + + return [ + sheet.name + for sheet in self.book.sheets_metadata + if sheet.typ == SheetTypeEnum.WorkSheet + ] + + def get_sheet_by_name(self, name: str) -> CalamineSheet: + self.raise_if_bad_sheet_by_name(name) + return self.book.get_sheet_by_name(name) + + def get_sheet_by_index(self, index: int) -> CalamineSheet: + self.raise_if_bad_sheet_by_index(index) + return self.book.get_sheet_by_index(index) + + def get_sheet_data( + self, sheet: CalamineSheet, file_rows_needed: int | None = None + ) -> list[list[Scalar]]: + def _convert_cell(value: _CellValueT) -> Scalar: + if isinstance(value, float): + val = int(value) + if val == value: + return val + else: + return value + elif isinstance(value, date): + return pd.Timestamp(value) + elif isinstance(value, timedelta): + return pd.Timedelta(value) + elif isinstance(value, time): + # cast needed here because Scalar doesn't include datetime.time + return cast(Scalar, value) + + return value + + rows: list[list[_CellValueT]] = sheet.to_python(skip_empty_area=False) + data: list[list[Scalar]] = [] + + for row in rows: + data.append([_convert_cell(cell) for cell in row]) + if file_rows_needed is not None and len(data) >= file_rows_needed: + break + + return data diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 6db70c894f692..de444019e7b4c 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -54,6 +54,7 @@ ), pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")), pytest.param("odf", marks=td.skip_if_no("odf")), + pytest.param("calamine", marks=td.skip_if_no("python_calamine")), ] @@ -67,11 +68,11 @@ def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool: return False if engine == "odf" and read_ext != ".ods": return False - if read_ext == ".ods" and engine != "odf": + if read_ext == ".ods" and engine not in {"odf", "calamine"}: return False if engine == "pyxlsb" and read_ext != ".xlsb": return False - if read_ext == ".xlsb" and engine != "pyxlsb": + if read_ext == ".xlsb" and engine not in {"pyxlsb", "calamine"}: return False if engine == "xlrd" and read_ext != ".xls": return False @@ -160,9 +161,9 @@ def test_engine_kwargs(self, read_ext, engine): "ods": {"foo": "abcd"}, } - if read_ext[1:] in {"xls", "xlsb"}: + if engine in {"xlrd", "pyxlsb"}: msg = re.escape(r"open_workbook() got an unexpected keyword argument 'foo'") - elif read_ext[1:] == "ods": + elif engine == "odf": msg = re.escape(r"load() got an unexpected keyword argument 'foo'") else: msg = re.escape(r"load_workbook() got an unexpected keyword argument 'foo'") @@ -194,8 +195,8 @@ def test_usecols_int(self, read_ext): usecols=3, ) - def test_usecols_list(self, request, read_ext, df_ref): - if read_ext == ".xlsb": + def test_usecols_list(self, request, engine, read_ext, df_ref): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -218,8 +219,8 @@ def test_usecols_list(self, request, read_ext, df_ref): tm.assert_frame_equal(df1, df_ref, check_names=False) tm.assert_frame_equal(df2, df_ref, check_names=False) - def test_usecols_str(self, request, read_ext, df_ref): - if read_ext == ".xlsb": + def test_usecols_str(self, request, engine, read_ext, df_ref): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -275,9 +276,9 @@ def test_usecols_str(self, request, read_ext, df_ref): "usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]] ) def test_usecols_diff_positional_int_columns_order( - self, request, read_ext, usecols, df_ref + self, request, engine, read_ext, usecols, df_ref ): - if read_ext == ".xlsb": + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -298,8 +299,8 @@ def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_r result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols) tm.assert_frame_equal(result, expected, check_names=False) - def test_read_excel_without_slicing(self, request, read_ext, df_ref): - if read_ext == ".xlsb": + def test_read_excel_without_slicing(self, request, engine, read_ext, df_ref): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -310,8 +311,8 @@ def test_read_excel_without_slicing(self, request, read_ext, df_ref): result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) tm.assert_frame_equal(result, expected, check_names=False) - def test_usecols_excel_range_str(self, request, read_ext, df_ref): - if read_ext == ".xlsb": + def test_usecols_excel_range_str(self, request, engine, read_ext, df_ref): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -398,20 +399,26 @@ def test_excel_stop_iterator(self, read_ext): expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"]) tm.assert_frame_equal(parsed, expected) - def test_excel_cell_error_na(self, request, read_ext): - if read_ext == ".xlsb": + def test_excel_cell_error_na(self, request, engine, read_ext): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" ) ) + # https://github.com/tafia/calamine/issues/355 + if engine == "calamine" and read_ext == ".ods": + request.node.add_marker( + pytest.mark.xfail(reason="Calamine can't extract error from ods files") + ) + parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1") expected = DataFrame([[np.nan]], columns=["Test"]) tm.assert_frame_equal(parsed, expected) - def test_excel_table(self, request, read_ext, df_ref): - if read_ext == ".xlsb": + def test_excel_table(self, request, engine, read_ext, df_ref): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -431,8 +438,8 @@ def test_excel_table(self, request, read_ext, df_ref): ) tm.assert_frame_equal(df3, df1.iloc[:-1]) - def test_reader_special_dtypes(self, request, read_ext): - if read_ext == ".xlsb": + def test_reader_special_dtypes(self, request, engine, read_ext): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -571,11 +578,17 @@ def test_reader_dtype_str(self, read_ext, dtype, expected): actual = pd.read_excel(basename + read_ext, dtype=dtype) tm.assert_frame_equal(actual, expected) - def test_dtype_backend(self, read_ext, dtype_backend): + def test_dtype_backend(self, request, engine, read_ext, dtype_backend): # GH#36712 if read_ext in (".xlsb", ".xls"): pytest.skip(f"No engine for filetype: '{read_ext}'") + # GH 54994 + if engine == "calamine" and read_ext == ".ods": + request.node.add_marker( + pytest.mark.xfail(reason="OdsWriter produces broken file") + ) + df = DataFrame( { "a": Series([1, 3], dtype="Int64"), @@ -616,11 +629,17 @@ def test_dtype_backend(self, read_ext, dtype_backend): expected = df tm.assert_frame_equal(result, expected) - def test_dtype_backend_and_dtype(self, read_ext): + def test_dtype_backend_and_dtype(self, request, engine, read_ext): # GH#36712 if read_ext in (".xlsb", ".xls"): pytest.skip(f"No engine for filetype: '{read_ext}'") + # GH 54994 + if engine == "calamine" and read_ext == ".ods": + request.node.add_marker( + pytest.mark.xfail(reason="OdsWriter produces broken file") + ) + df = DataFrame({"a": [np.nan, 1.0], "b": [2.5, np.nan]}) with tm.ensure_clean(read_ext) as file_path: df.to_excel(file_path, sheet_name="test", index=False) @@ -632,11 +651,17 @@ def test_dtype_backend_and_dtype(self, read_ext): ) tm.assert_frame_equal(result, df) - def test_dtype_backend_string(self, read_ext, string_storage): + def test_dtype_backend_string(self, request, engine, read_ext, string_storage): # GH#36712 if read_ext in (".xlsb", ".xls"): pytest.skip(f"No engine for filetype: '{read_ext}'") + # GH 54994 + if engine == "calamine" and read_ext == ".ods": + request.node.add_marker( + pytest.mark.xfail(reason="OdsWriter produces broken file") + ) + pa = pytest.importorskip("pyarrow") with pd.option_context("mode.string_storage", string_storage): @@ -800,8 +825,8 @@ def test_date_conversion_overflow(self, request, engine, read_ext): result = pd.read_excel("testdateoverflow" + read_ext) tm.assert_frame_equal(result, expected) - def test_sheet_name(self, request, read_ext, df_ref): - if read_ext == ".xlsb": + def test_sheet_name(self, request, read_ext, engine, df_ref): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -869,6 +894,11 @@ def test_corrupt_bytes_raises(self, engine): "Unsupported format, or corrupt file: Expected BOF " "record; found b'foo'" ) + elif engine == "calamine": + from python_calamine import CalamineError + + error = CalamineError + msg = "Cannot detect file format" else: error = BadZipFile msg = "File is not a zip file" @@ -969,6 +999,14 @@ def test_reader_seconds(self, request, engine, read_ext): ) ) + # GH 55045 + if engine == "calamine" and read_ext == ".ods": + request.node.add_marker( + pytest.mark.xfail( + reason="ODS file contains bad datetime (seconds as text)" + ) + ) + # Test reading times with and without milliseconds. GH5945. expected = DataFrame.from_dict( { @@ -994,15 +1032,21 @@ def test_reader_seconds(self, request, engine, read_ext): actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1") tm.assert_frame_equal(actual, expected) - def test_read_excel_multiindex(self, request, read_ext): + def test_read_excel_multiindex(self, request, engine, read_ext): # see gh-4679 - if read_ext == ".xlsb": + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" ) ) + # https://github.com/tafia/calamine/issues/354 + if engine == "calamine" and read_ext == ".ods": + request.node.add_marker( + pytest.mark.xfail(reason="Last test fails in calamine") + ) + mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]]) mi_file = "testmultiindex" + read_ext @@ -1088,10 +1132,10 @@ def test_read_excel_multiindex(self, request, read_ext): ], ) def test_read_excel_multiindex_blank_after_name( - self, request, read_ext, sheet_name, idx_lvl2 + self, request, engine, read_ext, sheet_name, idx_lvl2 ): # GH34673 - if read_ext == ".xlsb": + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb (GH4679" @@ -1212,9 +1256,9 @@ def test_read_excel_bool_header_arg(self, read_ext): with pytest.raises(TypeError, match=msg): pd.read_excel("test1" + read_ext, header=arg) - def test_read_excel_skiprows(self, request, read_ext): + def test_read_excel_skiprows(self, request, engine, read_ext): # GH 4903 - if read_ext == ".xlsb": + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -1267,9 +1311,9 @@ def test_read_excel_skiprows(self, request, read_ext): ) tm.assert_frame_equal(actual, expected) - def test_read_excel_skiprows_callable_not_in(self, request, read_ext): + def test_read_excel_skiprows_callable_not_in(self, request, engine, read_ext): # GH 4903 - if read_ext == ".xlsb": + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -1397,7 +1441,7 @@ def test_trailing_blanks(self, read_ext): def test_ignore_chartsheets_by_str(self, request, engine, read_ext): # GH 41448 - if engine == "odf": + if read_ext == ".ods": pytest.skip("chartsheets do not exist in the ODF format") if engine == "pyxlsb": request.node.add_marker( @@ -1410,7 +1454,7 @@ def test_ignore_chartsheets_by_str(self, request, engine, read_ext): def test_ignore_chartsheets_by_int(self, request, engine, read_ext): # GH 41448 - if engine == "odf": + if read_ext == ".ods": pytest.skip("chartsheets do not exist in the ODF format") if engine == "pyxlsb": request.node.add_marker( @@ -1540,8 +1584,8 @@ def test_excel_passes_na_filter(self, read_ext, na_filter): expected = DataFrame(expected, columns=["Test"]) tm.assert_frame_equal(parsed, expected) - def test_excel_table_sheet_by_index(self, request, read_ext, df_ref): - if read_ext == ".xlsb": + def test_excel_table_sheet_by_index(self, request, engine, read_ext, df_ref): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -1569,8 +1613,8 @@ def test_excel_table_sheet_by_index(self, request, read_ext, df_ref): tm.assert_frame_equal(df3, df1.iloc[:-1]) - def test_sheet_name(self, request, read_ext, df_ref): - if read_ext == ".xlsb": + def test_sheet_name(self, request, engine, read_ext, df_ref): + if engine == "pyxlsb": request.node.add_marker( pytest.mark.xfail( reason="Sheets containing datetimes not supported by pyxlsb" @@ -1639,7 +1683,7 @@ def test_excel_read_binary(self, engine, read_ext): def test_excel_read_binary_via_read_excel(self, read_ext, engine): # GH 38424 with open("test1" + read_ext, "rb") as f: - result = pd.read_excel(f) + result = pd.read_excel(f, engine=engine) expected = pd.read_excel("test1" + read_ext, engine=engine) tm.assert_frame_equal(result, expected) @@ -1691,7 +1735,7 @@ def test_engine_invalid_option(self, read_ext): def test_ignore_chartsheets(self, request, engine, read_ext): # GH 41448 - if engine == "odf": + if read_ext == ".ods": pytest.skip("chartsheets do not exist in the ODF format") if engine == "pyxlsb": request.node.add_marker( @@ -1711,6 +1755,10 @@ def test_corrupt_files_closed(self, engine, read_ext): import xlrd errors = (BadZipFile, xlrd.biffh.XLRDError) + elif engine == "calamine": + from python_calamine import CalamineError + + errors = (CalamineError,) with tm.ensure_clean(f"corrupt{read_ext}") as file: Path(file).write_text("corrupt", encoding="utf-8") diff --git a/pyproject.toml b/pyproject.toml index 74d6aaee286a9..9e579036c128b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,7 +69,7 @@ computation = ['scipy>=1.8.1', 'xarray>=2022.03.0'] fss = ['fsspec>=2022.05.0'] aws = ['s3fs>=2022.05.0'] gcp = ['gcsfs>=2022.05.0', 'pandas-gbq>=0.17.5'] -excel = ['odfpy>=1.4.1', 'openpyxl>=3.0.10', 'pyxlsb>=1.0.9', 'xlrd>=2.0.1', 'xlsxwriter>=3.0.3'] +excel = ['odfpy>=1.4.1', 'openpyxl>=3.0.10', 'python-calamine>=0.1.6', 'pyxlsb>=1.0.9', 'xlrd>=2.0.1', 'xlsxwriter>=3.0.3'] parquet = ['pyarrow>=7.0.0'] feather = ['pyarrow>=7.0.0'] hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) @@ -112,6 +112,7 @@ all = ['beautifulsoup4>=4.11.1', 'pytest>=7.3.2', 'pytest-xdist>=2.2.0', 'pytest-asyncio>=0.17.0', + 'python-calamine>=0.1.6', 'pyxlsb>=1.0.9', 'qtpy>=2.2.0', 'scipy>=1.8.1', diff --git a/requirements-dev.txt b/requirements-dev.txt index be02007a36333..ef3587b10d416 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -36,6 +36,7 @@ pyarrow>=7.0.0 pymysql>=1.0.2 pyreadstat>=1.1.5 tables>=3.7.0 +python-calamine>=0.1.6 pyxlsb>=1.0.9 s3fs>=2022.05.0 scipy>=1.8.1 diff --git a/scripts/tests/data/deps_expected_random.yaml b/scripts/tests/data/deps_expected_random.yaml index c70025f8f019d..1ede20f5cc0d8 100644 --- a/scripts/tests/data/deps_expected_random.yaml +++ b/scripts/tests/data/deps_expected_random.yaml @@ -44,6 +44,7 @@ dependencies: - pymysql>=1.0.2 - pyreadstat>=1.1.2 - pytables>=3.6.1 + - python-calamine>=0.1.6 - pyxlsb>=1.0.8 - s3fs>=2021.08.0 - scipy>=1.7.1 diff --git a/scripts/tests/data/deps_minimum.toml b/scripts/tests/data/deps_minimum.toml index b43815a982139..501ec4f061f17 100644 --- a/scripts/tests/data/deps_minimum.toml +++ b/scripts/tests/data/deps_minimum.toml @@ -62,7 +62,7 @@ computation = ['scipy>=1.7.1', 'xarray>=0.21.0'] fss = ['fsspec>=2021.07.0'] aws = ['s3fs>=2021.08.0'] gcp = ['gcsfs>=2021.07.0', 'pandas-gbq>=0.15.0'] -excel = ['odfpy>=1.4.1', 'openpyxl>=3.0.7', 'pyxlsb>=1.0.8', 'xlrd>=2.0.1', 'xlsxwriter>=1.4.3'] +excel = ['odfpy>=1.4.1', 'openpyxl>=3.0.7', 'python-calamine>=0.1.6', 'pyxlsb>=1.0.8', 'xlrd>=2.0.1', 'xlsxwriter>=1.4.3'] parquet = ['pyarrow>=7.0.0'] feather = ['pyarrow>=7.0.0'] hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) @@ -103,6 +103,7 @@ all = ['beautifulsoup4>=5.9.3', 'pytest>=7.3.2', 'pytest-xdist>=2.2.0', 'pytest-asyncio>=0.17.0', + 'python-calamine>=0.1.6', 'pyxlsb>=1.0.8', 'qtpy>=2.2.0', 'scipy>=1.7.1', diff --git a/scripts/tests/data/deps_unmodified_random.yaml b/scripts/tests/data/deps_unmodified_random.yaml index 503eb3c7c7734..14bedd1025bf8 100644 --- a/scripts/tests/data/deps_unmodified_random.yaml +++ b/scripts/tests/data/deps_unmodified_random.yaml @@ -44,6 +44,7 @@ dependencies: - pymysql>=1.0.2 - pyreadstat>=1.1.2 - pytables>=3.6.1 + - python-calamine>=0.1.6 - pyxlsb>=1.0.8 - s3fs>=2021.08.0 - scipy>=1.7.1 From 0bdbc44babac09225bdde02b642252ce054723e3 Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Wed, 13 Sep 2023 13:00:54 -0400 Subject: [PATCH 55/93] PERF: Index.difference (#55108) * PERF: Index.difference * whatsnew * remove is_monotonic check --- doc/source/whatsnew/v2.2.0.rst | 1 + pandas/core/indexes/base.py | 17 +++-------------- pandas/tests/indexes/datetimes/test_setops.py | 4 +++- pandas/tests/indexes/timedeltas/test_setops.py | 4 +++- 4 files changed, 10 insertions(+), 16 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 249f08c7e387b..1b8864809975f 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -178,6 +178,7 @@ Performance improvements - Performance improvement in :func:`concat` with ``axis=1`` and objects with unaligned indexes (:issue:`55084`) - Performance improvement in :func:`to_dict` on converting DataFrame to dictionary (:issue:`50990`) - Performance improvement in :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` when indexed by a :class:`MultiIndex` (:issue:`54835`) +- Performance improvement in :meth:`Index.difference` (:issue:`55108`) - Performance improvement when indexing with more than 4 keys (:issue:`54550`) - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index cd55997ad5f69..8756bb3f3c81b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3615,21 +3615,10 @@ def difference(self, other, sort=None): def _difference(self, other, sort): # overridden by RangeIndex - - this = self.unique() - - indexer = this.get_indexer_for(other) - indexer = indexer.take((indexer != -1).nonzero()[0]) - - label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) - - the_diff: MultiIndex | ArrayLike - if isinstance(this, ABCMultiIndex): - the_diff = this.take(label_diff) - else: - the_diff = this._values.take(label_diff) + other = other.unique() + the_diff = self[other.get_indexer_for(self) == -1] + the_diff = the_diff if self.is_unique else the_diff.unique() the_diff = _maybe_try_sort(the_diff, sort) - return the_diff def _wrap_difference_result(self, other, result): diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index 2e7b38abf4212..b56bad7f2e833 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -343,9 +343,11 @@ def test_difference_freq(self, sort): tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal("freq", idx_diff, expected) + # preserve frequency when the difference is a contiguous + # subset of the original range other = date_range("20160922", "20160925", freq="D") idx_diff = index.difference(other, sort) - expected = DatetimeIndex(["20160920", "20160921"], freq=None) + expected = DatetimeIndex(["20160920", "20160921"], freq="D") tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal("freq", idx_diff, expected) diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py index cb6dce1e7ad80..6cdd6944e90ea 100644 --- a/pandas/tests/indexes/timedeltas/test_setops.py +++ b/pandas/tests/indexes/timedeltas/test_setops.py @@ -219,9 +219,11 @@ def test_difference_freq(self, sort): tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal("freq", idx_diff, expected) + # preserve frequency when the difference is a contiguous + # subset of the original range other = timedelta_range("2 days", "5 days", freq="D") idx_diff = index.difference(other, sort) - expected = TimedeltaIndex(["0 days", "1 days"], freq=None) + expected = TimedeltaIndex(["0 days", "1 days"], freq="D") tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal("freq", idx_diff, expected) From 67b1e8b1f1fbf98c8e4e10473e6ac691d515593e Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 13 Sep 2023 07:03:48 -1000 Subject: [PATCH 56/93] DOC: Remove deprecated attributes in DatetimeIndex (#55093) --- pandas/core/indexes/datetimes.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index dcb5f8caccd3e..400747cbf6b8d 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -198,8 +198,6 @@ class DatetimeIndex(DatetimeTimedeltaMixin): timetz dayofyear day_of_year - weekofyear - week dayofweek day_of_week weekday From 310f8a8a31dd88a55641ce742c7d13a2a8b0e238 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 13 Sep 2023 07:10:10 -1000 Subject: [PATCH 57/93] BUG: dt.tz with ArrowDtype returned string (#55072) --- doc/source/whatsnew/v2.1.1.rst | 1 + pandas/core/arrays/arrow/array.py | 3 ++- pandas/tests/extension/test_arrow.py | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 42af61be26355..6d5da7cdff3b3 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -35,6 +35,7 @@ Bug fixes ~~~~~~~~~ - Fixed bug for :class:`ArrowDtype` raising ``NotImplementedError`` for fixed-size list (:issue:`55000`) - Fixed bug in :meth:`DataFrame.stack` with ``future_stack=True`` and columns a non-:class:`MultiIndex` consisting of tuples (:issue:`54948`) +- Fixed bug in :meth:`Series.dt.tz` with :class:`ArrowDtype` where a string was returned instead of a ``tzinfo`` object (:issue:`55003`) - Fixed bug in :meth:`Series.pct_change` and :meth:`DataFrame.pct_change` showing unnecessary ``FutureWarning`` (:issue:`54981`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 83ed54c42a23c..2b2e0c843564f 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -18,6 +18,7 @@ from pandas._libs.tslibs import ( Timedelta, Timestamp, + timezones, ) from pandas.compat import ( pa_version_under7p0, @@ -2425,7 +2426,7 @@ def _dt_time(self): @property def _dt_tz(self): - return self.dtype.pyarrow_dtype.tz + return timezones.maybe_get_tz(self.dtype.pyarrow_dtype.tz) @property def _dt_unit(self): diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 2e98eea3cac8a..8968b9a7f25fe 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -31,6 +31,7 @@ import pytest from pandas._libs import lib +from pandas._libs.tslibs import timezones from pandas.compat import ( PY311, is_ci_environment, @@ -2432,7 +2433,7 @@ def test_dt_tz(tz): dtype=ArrowDtype(pa.timestamp("ns", tz=tz)), ) result = ser.dt.tz - assert result == tz + assert result == timezones.maybe_get_tz(tz) def test_dt_isocalendar(): From 4e28925751491581a8bf92531714204f2a68dcde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20=C5=A0=C3=ADcho?= Date: Wed, 13 Sep 2023 19:14:59 +0200 Subject: [PATCH 58/93] BUG: This fixes #55009 (`raw=True` caused `apply` method of `DataFrame` to ignore passed arguments) (#55089) * fixes #55009 * update documentation * write documentation * add test * change formatting * cite DataDrame directly in docs Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- doc/source/whatsnew/v2.2.0.rst | 1 + pandas/core/apply.py | 6 +++++- pandas/tests/apply/test_frame_apply.py | 5 +++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 1b8864809975f..117df65f983af 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -189,6 +189,7 @@ Bug fixes ~~~~~~~~~ - Bug in :class:`AbstractHolidayCalendar` where timezone data was not propagated when computing holiday observances (:issue:`54580`) - Bug in :class:`pandas.core.window.Rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`) +- Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`) Categorical ^^^^^^^^^^^ diff --git a/pandas/core/apply.py b/pandas/core/apply.py index cc594bc8efb34..9748d4fe66739 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -955,7 +955,11 @@ def wrapper(*args, **kwargs): result = np.squeeze(result) else: result = np.apply_along_axis( - wrap_function(self.func), self.axis, self.values + wrap_function(self.func), + self.axis, + self.values, + *self.args, + **self.kwargs, ) # TODO: mixed type case diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 3f2accc23e2d6..227b72573f979 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -45,8 +45,9 @@ def test_apply(float_frame): @pytest.mark.parametrize("axis", [0, 1]) -def test_apply_args(float_frame, axis): - result = float_frame.apply(lambda x, y: x + y, axis, args=(1,)) +@pytest.mark.parametrize("raw", [True, False]) +def test_apply_args(float_frame, axis, raw): + result = float_frame.apply(lambda x, y: x + y, axis, args=(1,), raw=raw) expected = float_frame + 1 tm.assert_frame_equal(result, expected) From 13b132e7d154cee2b6daf3133a283d745fee4def Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Thu, 14 Sep 2023 02:05:08 +0600 Subject: [PATCH 59/93] BUG: boolean/string value in OdsWriter (#54994) (#54996) --- doc/source/whatsnew/v2.2.0.rst | 1 + pandas/io/excel/_odswriter.py | 27 ++++++++++---- pandas/tests/io/excel/test_odswriter.py | 49 +++++++++++++++++++++++++ pandas/tests/io/excel/test_readers.py | 24 ++---------- 4 files changed, 72 insertions(+), 29 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 117df65f983af..54e855f61905a 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -250,6 +250,7 @@ I/O ^^^ - Bug in :func:`read_csv` where ``on_bad_lines="warn"`` would write to ``stderr`` instead of raise a Python warning. This now yields a :class:`.errors.ParserWarning` (:issue:`54296`) - Bug in :func:`read_excel`, with ``engine="xlrd"`` (``xls`` files) erroring when file contains NaNs/Infs (:issue:`54564`) +- Bug in :func:`to_excel`, with ``OdsWriter`` (``ods`` files) writing boolean/string value (:issue:`54994`) Period ^^^^^^ diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py index 74cbe90acdae8..bc7dca2d95b6b 100644 --- a/pandas/io/excel/_odswriter.py +++ b/pandas/io/excel/_odswriter.py @@ -192,7 +192,15 @@ def _make_table_cell(self, cell) -> tuple[object, Any]: if isinstance(val, bool): value = str(val).lower() pvalue = str(val).upper() - if isinstance(val, datetime.datetime): + return ( + pvalue, + TableCell( + valuetype="boolean", + booleanvalue=value, + attributes=attributes, + ), + ) + elif isinstance(val, datetime.datetime): # Fast formatting value = val.isoformat() # Slow but locale-dependent @@ -210,17 +218,20 @@ def _make_table_cell(self, cell) -> tuple[object, Any]: pvalue, TableCell(valuetype="date", datevalue=value, attributes=attributes), ) + elif isinstance(val, str): + return ( + pvalue, + TableCell( + valuetype="string", + stringvalue=value, + attributes=attributes, + ), + ) else: - class_to_cell_type = { - str: "string", - int: "float", - float: "float", - bool: "boolean", - } return ( pvalue, TableCell( - valuetype=class_to_cell_type[type(val)], + valuetype="float", value=value, attributes=attributes, ), diff --git a/pandas/tests/io/excel/test_odswriter.py b/pandas/tests/io/excel/test_odswriter.py index 21d31ec8a7fb5..ecee58362f8a9 100644 --- a/pandas/tests/io/excel/test_odswriter.py +++ b/pandas/tests/io/excel/test_odswriter.py @@ -1,7 +1,12 @@ +from datetime import ( + date, + datetime, +) import re import pytest +import pandas as pd import pandas._testing as tm from pandas.io.excel import ExcelWriter @@ -47,3 +52,47 @@ def test_book_and_sheets_consistent(ext): table = odf.table.Table(name="test_name") writer.book.spreadsheet.addElement(table) assert writer.sheets == {"test_name": table} + + +@pytest.mark.parametrize( + ["value", "cell_value_type", "cell_value_attribute", "cell_value"], + argvalues=[ + (True, "boolean", "boolean-value", "true"), + ("test string", "string", "string-value", "test string"), + (1, "float", "value", "1"), + (1.5, "float", "value", "1.5"), + ( + datetime(2010, 10, 10, 10, 10, 10), + "date", + "date-value", + "2010-10-10T10:10:10", + ), + (date(2010, 10, 10), "date", "date-value", "2010-10-10"), + ], +) +def test_cell_value_type(ext, value, cell_value_type, cell_value_attribute, cell_value): + # GH#54994 ODS: cell attributes should follow specification + # http://docs.oasis-open.org/office/v1.2/os/OpenDocument-v1.2-os-part1.html#refTable13 + from odf.namespaces import OFFICENS + from odf.table import ( + TableCell, + TableRow, + ) + + table_cell_name = TableCell().qname + + with tm.ensure_clean(ext) as f: + pd.DataFrame([[value]]).to_excel(f, header=False, index=False) + + with pd.ExcelFile(f) as wb: + sheet = wb._reader.get_sheet_by_index(0) + sheet_rows = sheet.getElementsByType(TableRow) + sheet_cells = [ + x + for x in sheet_rows[0].childNodes + if hasattr(x, "qname") and x.qname == table_cell_name + ] + + cell = sheet_cells[0] + assert cell.attributes.get((OFFICENS, "value-type")) == cell_value_type + assert cell.attributes.get((OFFICENS, cell_value_attribute)) == cell_value diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index de444019e7b4c..8dd9f96a05a90 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -578,17 +578,11 @@ def test_reader_dtype_str(self, read_ext, dtype, expected): actual = pd.read_excel(basename + read_ext, dtype=dtype) tm.assert_frame_equal(actual, expected) - def test_dtype_backend(self, request, engine, read_ext, dtype_backend): + def test_dtype_backend(self, read_ext, dtype_backend): # GH#36712 if read_ext in (".xlsb", ".xls"): pytest.skip(f"No engine for filetype: '{read_ext}'") - # GH 54994 - if engine == "calamine" and read_ext == ".ods": - request.node.add_marker( - pytest.mark.xfail(reason="OdsWriter produces broken file") - ) - df = DataFrame( { "a": Series([1, 3], dtype="Int64"), @@ -629,17 +623,11 @@ def test_dtype_backend(self, request, engine, read_ext, dtype_backend): expected = df tm.assert_frame_equal(result, expected) - def test_dtype_backend_and_dtype(self, request, engine, read_ext): + def test_dtype_backend_and_dtype(self, read_ext): # GH#36712 if read_ext in (".xlsb", ".xls"): pytest.skip(f"No engine for filetype: '{read_ext}'") - # GH 54994 - if engine == "calamine" and read_ext == ".ods": - request.node.add_marker( - pytest.mark.xfail(reason="OdsWriter produces broken file") - ) - df = DataFrame({"a": [np.nan, 1.0], "b": [2.5, np.nan]}) with tm.ensure_clean(read_ext) as file_path: df.to_excel(file_path, sheet_name="test", index=False) @@ -651,17 +639,11 @@ def test_dtype_backend_and_dtype(self, request, engine, read_ext): ) tm.assert_frame_equal(result, df) - def test_dtype_backend_string(self, request, engine, read_ext, string_storage): + def test_dtype_backend_string(self, read_ext, string_storage): # GH#36712 if read_ext in (".xlsb", ".xls"): pytest.skip(f"No engine for filetype: '{read_ext}'") - # GH 54994 - if engine == "calamine" and read_ext == ".ods": - request.node.add_marker( - pytest.mark.xfail(reason="OdsWriter produces broken file") - ) - pa = pytest.importorskip("pyarrow") with pd.option_context("mode.string_storage", string_storage): From 51c2300210533a27fbd8bb58f93c2f382bbbdc40 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Wed, 13 Sep 2023 19:36:04 -0400 Subject: [PATCH 60/93] Use pandasSQL transactions in sql test suite to avoid engine deadlocks (#55129) pandasSQL use transactions in test suite --- pandas/tests/io/test_sql.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index bbdb22955297e..1abe0ad55a864 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1141,18 +1141,21 @@ def load_types_data(self, types_data): def _read_sql_iris_parameter(self, sql_strings): query = sql_strings["read_parameters"][self.flavor] params = ("Iris-setosa", 5.1) - iris_frame = self.pandasSQL.read_query(query, params=params) + with self.pandasSQL.run_transaction(): + iris_frame = self.pandasSQL.read_query(query, params=params) check_iris_frame(iris_frame) def _read_sql_iris_named_parameter(self, sql_strings): query = sql_strings["read_named_parameters"][self.flavor] params = {"name": "Iris-setosa", "length": 5.1} - iris_frame = self.pandasSQL.read_query(query, params=params) + with self.pandasSQL.run_transaction(): + iris_frame = self.pandasSQL.read_query(query, params=params) check_iris_frame(iris_frame) def _read_sql_iris_no_parameter_with_percent(self, sql_strings): query = sql_strings["read_no_parameters_with_percent"][self.flavor] - iris_frame = self.pandasSQL.read_query(query, params=None) + with self.pandasSQL.run_transaction(): + iris_frame = self.pandasSQL.read_query(query, params=None) check_iris_frame(iris_frame) def _to_sql_empty(self, test_frame1): @@ -1182,7 +1185,8 @@ def _to_sql_with_sql_engine(self, test_frame1, engine="auto", **engine_kwargs): def _roundtrip(self, test_frame1): self.drop_table("test_frame_roundtrip", self.conn) assert self.pandasSQL.to_sql(test_frame1, "test_frame_roundtrip") == 4 - result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip") + with self.pandasSQL.run_transaction(): + result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip") result.set_index("level_0", inplace=True) # result.index.astype(int) @@ -1232,13 +1236,14 @@ class DummyException(Exception): except DummyException: # ignore raised exception pass - res = self.pandasSQL.read_query("SELECT * FROM test_trans") + with self.pandasSQL.run_transaction(): + res = self.pandasSQL.read_query("SELECT * FROM test_trans") assert len(res) == 0 # Make sure when transaction is committed, rows do get inserted with self.pandasSQL.run_transaction() as trans: trans.execute(ins_sql) - res2 = self.pandasSQL.read_query("SELECT * FROM test_trans") + res2 = self.pandasSQL.read_query("SELECT * FROM test_trans") assert len(res2) == 1 From 81fb7e76073ffe6adb875f15cdcfbac52c15b339 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Wed, 13 Sep 2023 16:37:46 -0700 Subject: [PATCH 61/93] DEPS: remove duplicated dependency in requirement-dev.txt (#55101) * Test installing dev dependencies with pip * fix typo * remove 3.12, list deps * remove pip ci test --- environment.yml | 2 +- requirements-dev.txt | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 1eb0b4cc2c7a6..8deae839f5408 100644 --- a/environment.yml +++ b/environment.yml @@ -106,7 +106,7 @@ dependencies: - ipykernel # web - - jinja2 # in optional dependencies, but documented here as needed + # - jinja2 # already listed in optional dependencies, but documented here for reference - markdown - feedparser - pyyaml diff --git a/requirements-dev.txt b/requirements-dev.txt index ef3587b10d416..01e0701bc39a7 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -77,7 +77,6 @@ ipywidgets nbformat notebook>=6.0.3 ipykernel -jinja2 markdown feedparser pyyaml From f00efd0344bd4e22cc867e76c776cb88669e6cde Mon Sep 17 00:00:00 2001 From: William Ayd Date: Wed, 13 Sep 2023 19:39:07 -0400 Subject: [PATCH 62/93] Assorted UBSAN cleanups (#55112) * first round of fixes * fix up includes * updates * dedup logic * move comment --- .../src/vendored/ujson/lib/ultrajsonenc.c | 8 ++- pandas/_libs/tslibs/np_datetime.pyx | 49 +++++++++++++------ 2 files changed, 41 insertions(+), 16 deletions(-) diff --git a/pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c index e3e710ce1b876..942bd0b518144 100644 --- a/pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c +++ b/pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c @@ -44,6 +44,7 @@ Numeric decoder derived from TCL library #include #include #include +#include #include #include #include @@ -763,7 +764,12 @@ void Buffer_AppendIntUnchecked(JSONObjectEncoder *enc, JSINT32 value) { void Buffer_AppendLongUnchecked(JSONObjectEncoder *enc, JSINT64 value) { char *wstr; - JSUINT64 uvalue = (value < 0) ? -value : value; + JSUINT64 uvalue; + if (value == INT64_MIN) { + uvalue = INT64_MAX + UINT64_C(1); + } else { + uvalue = (value < 0) ? -value : value; + } wstr = enc->offset; // Conversion. Number is reversed. diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 7b2ee68c73ad2..c3ee68e14a8d4 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -1,4 +1,3 @@ -cimport cython from cpython.datetime cimport ( PyDateTime_CheckExact, PyDateTime_DATE_GET_HOUR, @@ -18,6 +17,7 @@ from cpython.object cimport ( Py_LT, Py_NE, ) +from libc.stdint cimport INT64_MAX import_datetime() PandasDateTime_IMPORT @@ -545,7 +545,6 @@ cdef ndarray astype_round_check( return iresult -@cython.overflowcheck(True) cdef int64_t get_conversion_factor( NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit @@ -553,6 +552,7 @@ cdef int64_t get_conversion_factor( """ Find the factor by which we need to multiply to convert from from_unit to to_unit. """ + cdef int64_t value, overflow_limit, factor if ( from_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC or to_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC @@ -565,28 +565,44 @@ cdef int64_t get_conversion_factor( return 1 if from_unit == NPY_DATETIMEUNIT.NPY_FR_W: - return 7 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_D, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_D, to_unit) + factor = 7 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_D: - return 24 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_h, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_h, to_unit) + factor = 24 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_h: - return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_m, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_m, to_unit) + factor = 60 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_m: - return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_s, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_s, to_unit) + factor = 60 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_s: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ms, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ms, to_unit) + factor = 1000 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ms: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_us, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_us, to_unit) + factor = 1000 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_us: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ns, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ns, to_unit) + factor = 1000 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ns: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ps, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ps, to_unit) + factor = 1000 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ps: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_fs, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_fs, to_unit) + factor = 1000 elif from_unit == NPY_DATETIMEUNIT.NPY_FR_fs: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_as, to_unit) + value = get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_as, to_unit) + factor = 1000 else: raise ValueError("Converting from M or Y units is not supported.") + overflow_limit = INT64_MAX // factor + if value > overflow_limit or value < -overflow_limit: + raise OverflowError("result would overflow") + + return factor * value + cdef int64_t convert_reso( int64_t value, @@ -595,7 +611,7 @@ cdef int64_t convert_reso( bint round_ok, ) except? -1: cdef: - int64_t res_value, mult, div, mod + int64_t res_value, mult, div, mod, overflow_limit if from_reso == to_reso: return value @@ -624,9 +640,12 @@ cdef int64_t convert_reso( else: # e.g. ns -> us, risk of overflow, but no risk of lossy rounding mult = get_conversion_factor(from_reso, to_reso) - with cython.overflowcheck(True): + overflow_limit = INT64_MAX // mult + if value > overflow_limit or value < -overflow_limit: # Note: caller is responsible for re-raising as OutOfBoundsTimedelta - res_value = value * mult + raise OverflowError("result would overflow") + + res_value = value * mult return res_value From 7134f2c14e614980bcf366f979a0f85aafacbde6 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Fri, 15 Sep 2023 11:10:52 +0200 Subject: [PATCH 63/93] CoW: Clear dead references every time we add a new one (#55008) --- pandas/_libs/internals.pyx | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 7a9a3b84fd69f..3b1a6bc7436c3 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -897,6 +897,11 @@ cdef class BlockValuesRefs: else: self.referenced_blocks = [] + def _clear_dead_references(self) -> None: + self.referenced_blocks = [ + ref for ref in self.referenced_blocks if ref() is not None + ] + def add_reference(self, blk: Block) -> None: """Adds a new reference to our reference collection. @@ -905,6 +910,7 @@ cdef class BlockValuesRefs: blk : Block The block that the new references should point to. """ + self._clear_dead_references() self.referenced_blocks.append(weakref.ref(blk)) def add_index_reference(self, index: object) -> None: @@ -915,6 +921,7 @@ cdef class BlockValuesRefs: index : Index The index that the new reference should point to. """ + self._clear_dead_references() self.referenced_blocks.append(weakref.ref(index)) def has_reference(self) -> bool: @@ -927,8 +934,6 @@ cdef class BlockValuesRefs: ------- bool """ - self.referenced_blocks = [ - ref for ref in self.referenced_blocks if ref() is not None - ] + self._clear_dead_references() # Checking for more references than block pointing to itself return len(self.referenced_blocks) > 1 From cc58350c92a9e24bef586cd1b898c01640b33abc Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Fri, 15 Sep 2023 16:06:24 -0400 Subject: [PATCH 64/93] PERF: groupby aggregations on pyarrow timestamp and duration types (#55131) * PERF: groupby aggregations on pyarrow timestamp and duration types * mypy * update --- doc/source/whatsnew/v2.2.0.rst | 1 + pandas/core/arrays/arrow/array.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 54e855f61905a..03b69b53836ad 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -177,6 +177,7 @@ Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Performance improvement in :func:`concat` with ``axis=1`` and objects with unaligned indexes (:issue:`55084`) - Performance improvement in :func:`to_dict` on converting DataFrame to dictionary (:issue:`50990`) +- Performance improvement in :meth:`DataFrame.groupby` when aggregating pyarrow timestamp and duration dtypes (:issue:`55031`) - Performance improvement in :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` when indexed by a :class:`MultiIndex` (:issue:`54835`) - Performance improvement in :meth:`Index.difference` (:issue:`55108`) - Performance improvement when indexing with more than 4 keys (:issue:`54550`) diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 2b2e0c843564f..a329c37c77449 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1993,9 +1993,17 @@ def _groupby_op( **kwargs, ) - masked = self._to_masked() + # maybe convert to a compatible dtype optimized for groupby + values: ExtensionArray + pa_type = self._pa_array.type + if pa.types.is_timestamp(pa_type): + values = self._to_datetimearray() + elif pa.types.is_duration(pa_type): + values = self._to_timedeltaarray() + else: + values = self._to_masked() - result = masked._groupby_op( + result = values._groupby_op( how=how, has_dropped_na=has_dropped_na, min_count=min_count, From 85062f0e09590b7da3edfe1cabd427f224180b9e Mon Sep 17 00:00:00 2001 From: Thomas Li <47963215+lithomas1@users.noreply.github.com> Date: Fri, 15 Sep 2023 22:03:21 -0400 Subject: [PATCH 65/93] CI: Unpin Cython (#54784) * CI: Unpin Cython * Update actions-311-numpydev.yaml * disable cache for now * try bumping meson * bump meson everywhere --------- Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- .github/workflows/unit-tests.yml | 12 ++++++------ ci/deps/actions-310.yaml | 4 ++-- ci/deps/actions-311-downstream_compat.yaml | 4 ++-- ci/deps/actions-311-numpydev.yaml | 4 ++-- ci/deps/actions-311-pyarrownightly.yaml | 4 ++-- ci/deps/actions-311.yaml | 4 ++-- ci/deps/actions-39-minimum_versions.yaml | 4 ++-- ci/deps/actions-39.yaml | 4 ++-- ci/deps/actions-pypy-39.yaml | 4 ++-- ci/deps/circle-310-arm64.yaml | 4 ++-- environment.yml | 2 +- meson.build | 2 +- pyproject.toml | 2 +- requirements-dev.txt | 2 +- 14 files changed, 28 insertions(+), 28 deletions(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index f2b426269098b..98c6226b14075 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -233,8 +233,8 @@ jobs: run: | /opt/python/cp311-cp311/bin/python -m venv ~/virtualenvs/pandas-dev . ~/virtualenvs/pandas-dev/bin/activate - python -m pip install --no-cache-dir -U pip wheel setuptools meson[ninja]==1.0.1 meson-python==0.13.1 - python -m pip install --no-cache-dir versioneer[toml] "cython<3.0.1" numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 + python -m pip install --no-cache-dir -U pip wheel setuptools meson[ninja]==1.2.1 meson-python==0.13.1 + python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 python -m pip install --no-cache-dir --no-build-isolation -e . python -m pip list --no-cache-dir export PANDAS_CI=1 @@ -271,8 +271,8 @@ jobs: run: | /opt/python/cp311-cp311/bin/python -m venv ~/virtualenvs/pandas-dev . ~/virtualenvs/pandas-dev/bin/activate - python -m pip install --no-cache-dir -U pip wheel setuptools meson-python==0.13.1 meson[ninja]==1.0.1 - python -m pip install --no-cache-dir versioneer[toml] "cython<3.0.1" numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 + python -m pip install --no-cache-dir -U pip wheel setuptools meson-python==0.13.1 meson[ninja]==1.2.1 + python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 python -m pip install --no-cache-dir --no-build-isolation -e . python -m pip list --no-cache-dir @@ -342,10 +342,10 @@ jobs: - name: Build Environment run: | python --version - python -m pip install --upgrade pip setuptools wheel meson[ninja]==1.0.1 meson-python==0.13.1 + python -m pip install --upgrade pip setuptools wheel meson[ninja]==1.2.1 meson-python==0.13.1 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy python -m pip install versioneer[toml] - python -m pip install python-dateutil pytz tzdata "cython<3.0.1" hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-cov pytest-asyncio>=0.17 + python -m pip install python-dateutil pytz tzdata cython hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-cov pytest-asyncio>=0.17 python -m pip install -ve . --no-build-isolation --no-index python -m pip list diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index 927003b13d6be..682708d811878 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -6,8 +6,8 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.1 - - meson[ninja]=1.0.1 + - cython>=0.29.33 + - meson[ninja]=1.2.1 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index 00df41cce3bae..d09d4e5dea648 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -7,8 +7,8 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.1 - - meson[ninja]=1.0.1 + - cython>=0.29.33 + - meson[ninja]=1.2.1 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/actions-311-numpydev.yaml b/ci/deps/actions-311-numpydev.yaml index acf94fce5288b..7fd3a65ec91f8 100644 --- a/ci/deps/actions-311-numpydev.yaml +++ b/ci/deps/actions-311-numpydev.yaml @@ -6,8 +6,9 @@ dependencies: # build dependencies - versioneer[toml] - - meson[ninja]=1.0.1 + - meson[ninja]=1.2.1 - meson-python=0.13.1 + - cython>=0.29.33 # test dependencies - pytest>=7.3.2 @@ -25,7 +26,6 @@ dependencies: - pip - pip: - - "cython<3.0.1" - "--extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" - "--pre" - "numpy" diff --git a/ci/deps/actions-311-pyarrownightly.yaml b/ci/deps/actions-311-pyarrownightly.yaml index e850f69f945c5..893341350f4ef 100644 --- a/ci/deps/actions-311-pyarrownightly.yaml +++ b/ci/deps/actions-311-pyarrownightly.yaml @@ -6,8 +6,8 @@ dependencies: # build dependencies - versioneer[toml] - - meson[ninja]=1.0.1 - - cython>=0.29.33, <3.0.1 + - meson[ninja]=1.2.1 + - cython>=0.29.33 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index d50ea20da1e0c..f458546fc3a1b 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -6,8 +6,8 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.1 - - meson[ninja]=1.0.1 + - cython>=0.29.33 + - meson[ninja]=1.2.1 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml index 10862630bd596..b7cc6e9e891ce 100644 --- a/ci/deps/actions-39-minimum_versions.yaml +++ b/ci/deps/actions-39-minimum_versions.yaml @@ -8,8 +8,8 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.1 - - meson[ninja]=1.0.1 + - cython>=0.29.33 + - meson[ninja]=1.2.1 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 904b55a813a9f..4ee8dc0c6d3fc 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -6,8 +6,8 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.1 - - meson[ninja]=1.0.1 + - cython>=0.29.33 + - meson[ninja]=1.2.1 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/actions-pypy-39.yaml b/ci/deps/actions-pypy-39.yaml index 23c3706f43dad..4923c94ab08f3 100644 --- a/ci/deps/actions-pypy-39.yaml +++ b/ci/deps/actions-pypy-39.yaml @@ -9,8 +9,8 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.1 - - meson[ninja]=1.0.1 + - cython>=0.29.33 + - meson[ninja]=1.2.1 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml index 4060cea73e7f6..c520fa17551e0 100644 --- a/ci/deps/circle-310-arm64.yaml +++ b/ci/deps/circle-310-arm64.yaml @@ -6,8 +6,8 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.1 - - meson[ninja]=1.0.1 + - cython>=0.29.33 + - meson[ninja]=1.2.1 - meson-python=0.13.1 # test dependencies diff --git a/environment.yml b/environment.yml index 8deae839f5408..8103ee29c84a7 100644 --- a/environment.yml +++ b/environment.yml @@ -9,7 +9,7 @@ dependencies: # build dependencies - versioneer[toml] - cython=0.29.33 - - meson[ninja]=1.0.1 + - meson[ninja]=1.2.1 - meson-python=0.13.1 # test dependencies diff --git a/meson.build b/meson.build index e0e533ffade97..68018046c081f 100644 --- a/meson.build +++ b/meson.build @@ -4,7 +4,7 @@ project( 'c', 'cpp', 'cython', version: run_command(['generate_version.py', '--print'], check: true).stdout().strip(), license: 'BSD-3', - meson_version: '>=1.0.1', + meson_version: '>=1.2.1', default_options: [ 'buildtype=release', 'c_std=c99' diff --git a/pyproject.toml b/pyproject.toml index 9e579036c128b..7807a6cc6368d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ # See https://github.com/scipy/scipy/pull/12940 for the AIX issue. requires = [ "meson-python==0.13.1", - "meson==1.0.1", + "meson==1.2.1", "wheel", "Cython>=0.29.33,<3", # Note: sync with setup.py, environment.yml and asv.conf.json # Note: numpy 1.25 has a backwards compatible C API by default diff --git a/requirements-dev.txt b/requirements-dev.txt index 01e0701bc39a7..77afa9005029c 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -4,7 +4,7 @@ pip versioneer[toml] cython==0.29.33 -meson[ninja]==1.0.1 +meson[ninja]==1.2.1 meson-python==0.13.1 pytest>=7.3.2 pytest-cov From 7b4df5a0b7a58566c673bd32e7280ce74a95fcb9 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Sat, 16 Sep 2023 15:46:33 +0200 Subject: [PATCH 66/93] DOCS: temporary pin pydata-sphinx-theme to 0.13 (#55159) --- environment.yml | 2 +- requirements-dev.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 8103ee29c84a7..5caa57ef37ee8 100644 --- a/environment.yml +++ b/environment.yml @@ -85,7 +85,7 @@ dependencies: - google-auth - natsort # DataFrame.sort_values doctest - numpydoc - - pydata-sphinx-theme + - pydata-sphinx-theme=0.13 - pytest-cython # doctest - sphinx - sphinx-design diff --git a/requirements-dev.txt b/requirements-dev.txt index 77afa9005029c..b8e7e376378c6 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -60,7 +60,7 @@ gitdb google-auth natsort numpydoc -pydata-sphinx-theme +pydata-sphinx-theme==0.13 pytest-cython sphinx sphinx-design From 52bed6dfa9ba99df21fba2740da2a5a2832ef96c Mon Sep 17 00:00:00 2001 From: Deepak George Date: Sun, 17 Sep 2023 14:45:25 +0200 Subject: [PATCH 67/93] DOC: fix see also links in pandas.DataFrame.agg docstring (#55162) --- pandas/core/frame.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8fcb91c846826..124c21839afe7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9829,11 +9829,11 @@ def _gotitem( -------- DataFrame.apply : Perform any type of operations. DataFrame.transform : Perform transformation type operations. - pandas.core.groupby.GroupBy : Perform operations over groups. - pandas.core.resample.Resampler : Perform operations over resampled bins. - pandas.core.window.Rolling : Perform operations over rolling window. - pandas.core.window.Expanding : Perform operations over expanding window. - pandas.core.window.ExponentialMovingWindow : Perform operation over exponential + pandas.DataFrame.groupby : Perform operations over groups. + pandas.DataFrame.resample : Perform operations over resampled bins. + pandas.DataFrame.rolling : Perform operations over rolling window. + pandas.DataFrame.expanding : Perform operations over expanding window. + pandas.core.window.ewm.ExponentialMovingWindow : Perform operation over exponential weighted window. """ ) From f4f598fb36c0809da01cade2d5d832ee09564101 Mon Sep 17 00:00:00 2001 From: Elahe Sharifi Date: Sun, 17 Sep 2023 14:48:09 +0200 Subject: [PATCH 68/93] DOC: remove duplicated sentence in return secion of pandas.DataFrame.agg /asof docstrings (#55165) --- pandas/core/generic.py | 2 -- pandas/core/shared_docs.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 5c303e2a73bd7..bbaa6d3faeb05 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8323,8 +8323,6 @@ def asof(self, where, subset=None): * DataFrame : when `self` is a DataFrame and `where` is an array-like - Return scalar, Series, or DataFrame. - See Also -------- merge_asof : Perform an asof merge. Similar to left join. diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 9da103e13f691..ec219941a3afc 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -34,8 +34,6 @@ * scalar : when Series.agg is called with single function * Series : when DataFrame.agg is called with a single function * DataFrame : when DataFrame.agg is called with several functions - - Return scalar, Series or DataFrame. {see_also} Notes ----- From 36aa531b0f0e31cac32e56633ab90eb4b3fccda8 Mon Sep 17 00:00:00 2001 From: Tim Swast Date: Mon, 18 Sep 2023 16:15:09 +0000 Subject: [PATCH 69/93] ENH: add Series.struct accessor for ArrowDtype[struct] (#54977) Features: * Series.struct.dtypes -- see dtypes and field names * Series.struct.field(name_or_index) -- extract a field as a Series * Series.struct.explode() -- convert all fields into a DataFrame --- doc/source/reference/series.rst | 23 ++ doc/source/whatsnew/v2.2.0.rst | 28 +++ pandas/core/arrays/arrow/__init__.py | 3 +- pandas/core/arrays/arrow/accessors.py | 196 ++++++++++++++++++ pandas/core/series.py | 2 + .../series/accessors/test_struct_accessor.py | 147 +++++++++++++ 6 files changed, 398 insertions(+), 1 deletion(-) create mode 100644 pandas/core/arrays/arrow/accessors.py create mode 100644 pandas/tests/series/accessors/test_struct_accessor.py diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index 58351bab07b22..9acbab7a42800 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -525,6 +525,29 @@ Sparse-dtype specific methods and attributes are provided under the Series.sparse.from_coo Series.sparse.to_coo + +.. _api.series.struct: + +Struct accessor +~~~~~~~~~~~~~~~ + +Arrow struct-dtype specific methods and attributes are provided under the +``Series.struct`` accessor. + +.. autosummary:: + :toctree: api/ + :template: autosummary/accessor_attribute.rst + + Series.struct.dtypes + +.. autosummary:: + :toctree: api/ + :template: autosummary/accessor_method.rst + + Series.struct.field + Series.struct.explode + + .. _api.series.flags: Flags diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 03b69b53836ad..9e55399afe5af 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -36,6 +36,34 @@ There are two advantages of this engine: For more, see :ref:`io.calamine` in the user guide on IO tools. +.. _whatsnew_220.enhancements.struct_accessor: + +Series.struct accessor to with PyArrow structured data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``Series.struct`` accessor provides attributes and methods for processing +data with ``struct[pyarrow]`` dtype Series. For example, +:meth:`Series.struct.explode` converts PyArrow structured data to a pandas +DataFrame. (:issue:`54938`) + +.. ipython:: python + + import pyarrow as pa + series = pd.Series( + [ + {"project": "pandas", "version": "2.2.0"}, + {"project": "numpy", "version": "1.25.2"}, + {"project": "pyarrow", "version": "13.0.0"}, + ], + dtype=pd.ArrowDtype( + pa.struct([ + ("project", pa.string()), + ("version", pa.string()), + ]) + ), + ) + series.struct.explode() + .. _whatsnew_220.enhancements.enhancement2: enhancement2 diff --git a/pandas/core/arrays/arrow/__init__.py b/pandas/core/arrays/arrow/__init__.py index 58b268cbdd221..a3d33f91f597d 100644 --- a/pandas/core/arrays/arrow/__init__.py +++ b/pandas/core/arrays/arrow/__init__.py @@ -1,3 +1,4 @@ +from pandas.core.arrays.arrow.accessors import StructAccessor from pandas.core.arrays.arrow.array import ArrowExtensionArray -__all__ = ["ArrowExtensionArray"] +__all__ = ["ArrowExtensionArray", "StructAccessor"] diff --git a/pandas/core/arrays/arrow/accessors.py b/pandas/core/arrays/arrow/accessors.py new file mode 100644 index 0000000000000..e4ed255476e8e --- /dev/null +++ b/pandas/core/arrays/arrow/accessors.py @@ -0,0 +1,196 @@ +"""Accessors for arrow-backed data.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas.compat import pa_version_under7p0 + +if not pa_version_under7p0: + import pyarrow as pa + import pyarrow.compute as pc + + from pandas.core.dtypes.dtypes import ArrowDtype + +if TYPE_CHECKING: + from pandas import ( + DataFrame, + Series, + ) + + +class StructAccessor: + """ + Accessor object for structured data properties of the Series values. + + Parameters + ---------- + data : Series + Series containing Arrow struct data. + """ + + _validation_msg = ( + "Can only use the '.struct' accessor with 'struct[pyarrow]' dtype, not {dtype}." + ) + + def __init__(self, data=None) -> None: + self._parent = data + self._validate(data) + + def _validate(self, data): + dtype = data.dtype + if not isinstance(dtype, ArrowDtype): + # Raise AttributeError so that inspect can handle non-struct Series. + raise AttributeError(self._validation_msg.format(dtype=dtype)) + + if not pa.types.is_struct(dtype.pyarrow_dtype): + # Raise AttributeError so that inspect can handle non-struct Series. + raise AttributeError(self._validation_msg.format(dtype=dtype)) + + @property + def dtypes(self) -> Series: + """ + Return the dtype object of each child field of the struct. + + Returns + ------- + pandas.Series + The data type of each child field. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... {"version": 1, "project": "pandas"}, + ... {"version": 2, "project": "pandas"}, + ... {"version": 1, "project": "numpy"}, + ... ], + ... dtype=pd.ArrowDtype(pa.struct( + ... [("version", pa.int64()), ("project", pa.string())] + ... )) + ... ) + >>> s.struct.dtypes + version int64[pyarrow] + project string[pyarrow] + dtype: object + """ + from pandas import ( + Index, + Series, + ) + + pa_type = self._parent.dtype.pyarrow_dtype + types = [ArrowDtype(struct.type) for struct in pa_type] + names = [struct.name for struct in pa_type] + return Series(types, index=Index(names)) + + def field(self, name_or_index: str | int) -> Series: + """ + Extract a child field of a struct as a Series. + + Parameters + ---------- + name_or_index : str | int + Name or index of the child field to extract. + + Returns + ------- + pandas.Series + The data corresponding to the selected child field. + + See Also + -------- + Series.struct.explode : Return all child fields as a DataFrame. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... {"version": 1, "project": "pandas"}, + ... {"version": 2, "project": "pandas"}, + ... {"version": 1, "project": "numpy"}, + ... ], + ... dtype=pd.ArrowDtype(pa.struct( + ... [("version", pa.int64()), ("project", pa.string())] + ... )) + ... ) + + Extract by field name. + + >>> s.struct.field("project") + 0 pandas + 1 pandas + 2 numpy + Name: project, dtype: string[pyarrow] + + Extract by field index. + + >>> s.struct.field(0) + 0 1 + 1 2 + 2 1 + Name: version, dtype: int64[pyarrow] + """ + from pandas import Series + + pa_arr = self._parent.array._pa_array + if isinstance(name_or_index, int): + index = name_or_index + elif isinstance(name_or_index, str): + index = pa_arr.type.get_field_index(name_or_index) + else: + raise ValueError( + "name_or_index must be an int or str, " + f"got {type(name_or_index).__name__}" + ) + + pa_field = pa_arr.type[index] + field_arr = pc.struct_field(pa_arr, [index]) + return Series( + field_arr, + dtype=ArrowDtype(field_arr.type), + index=self._parent.index, + name=pa_field.name, + ) + + def explode(self) -> DataFrame: + """ + Extract all child fields of a struct as a DataFrame. + + Returns + ------- + pandas.DataFrame + The data corresponding to all child fields. + + See Also + -------- + Series.struct.field : Return a single child field as a Series. + + Examples + -------- + >>> import pyarrow as pa + >>> s = pd.Series( + ... [ + ... {"version": 1, "project": "pandas"}, + ... {"version": 2, "project": "pandas"}, + ... {"version": 1, "project": "numpy"}, + ... ], + ... dtype=pd.ArrowDtype(pa.struct( + ... [("version", pa.int64()), ("project", pa.string())] + ... )) + ... ) + + >>> s.struct.explode() + version project + 0 1 pandas + 1 2 pandas + 2 1 numpy + """ + from pandas import concat + + pa_type = self._parent.dtype.pyarrow_dtype + return concat( + [self.field(i) for i in range(pa_type.num_fields)], axis="columns" + ) diff --git a/pandas/core/series.py b/pandas/core/series.py index 9b5c8829fd5ff..e0e27581ef7e2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -101,6 +101,7 @@ from pandas.core.accessor import CachedAccessor from pandas.core.apply import SeriesApply from pandas.core.arrays import ExtensionArray +from pandas.core.arrays.arrow import StructAccessor from pandas.core.arrays.categorical import CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor from pandas.core.construction import ( @@ -5787,6 +5788,7 @@ def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series cat = CachedAccessor("cat", CategoricalAccessor) plot = CachedAccessor("plot", pandas.plotting.PlotAccessor) sparse = CachedAccessor("sparse", SparseAccessor) + struct = CachedAccessor("struct", StructAccessor) # ---------------------------------------------------------------------- # Add plotting methods to Series diff --git a/pandas/tests/series/accessors/test_struct_accessor.py b/pandas/tests/series/accessors/test_struct_accessor.py new file mode 100644 index 0000000000000..c645bb6807052 --- /dev/null +++ b/pandas/tests/series/accessors/test_struct_accessor.py @@ -0,0 +1,147 @@ +import re + +import pytest + +from pandas import ( + ArrowDtype, + DataFrame, + Index, + Series, +) +import pandas._testing as tm +from pandas.core.arrays.arrow.accessors import StructAccessor + +pa = pytest.importorskip("pyarrow") + + +def test_struct_accessor_dtypes(): + ser = Series( + [], + dtype=ArrowDtype( + pa.struct( + [ + ("int_col", pa.int64()), + ("string_col", pa.string()), + ( + "struct_col", + pa.struct( + [ + ("int_col", pa.int64()), + ("float_col", pa.float64()), + ] + ), + ), + ] + ) + ), + ) + actual = ser.struct.dtypes + expected = Series( + [ + ArrowDtype(pa.int64()), + ArrowDtype(pa.string()), + ArrowDtype( + pa.struct( + [ + ("int_col", pa.int64()), + ("float_col", pa.float64()), + ] + ) + ), + ], + index=Index(["int_col", "string_col", "struct_col"]), + ) + tm.assert_series_equal(actual, expected) + + +def test_struct_accessor_field(): + index = Index([-100, 42, 123]) + ser = Series( + [ + {"rice": 1.0, "maize": -1, "wheat": "a"}, + {"rice": 2.0, "maize": 0, "wheat": "b"}, + {"rice": 3.0, "maize": 1, "wheat": "c"}, + ], + dtype=ArrowDtype( + pa.struct( + [ + ("rice", pa.float64()), + ("maize", pa.int64()), + ("wheat", pa.string()), + ] + ) + ), + index=index, + ) + by_name = ser.struct.field("maize") + by_name_expected = Series( + [-1, 0, 1], + dtype=ArrowDtype(pa.int64()), + index=index, + name="maize", + ) + tm.assert_series_equal(by_name, by_name_expected) + + by_index = ser.struct.field(2) + by_index_expected = Series( + ["a", "b", "c"], + dtype=ArrowDtype(pa.string()), + index=index, + name="wheat", + ) + tm.assert_series_equal(by_index, by_index_expected) + + +def test_struct_accessor_field_with_invalid_name_or_index(): + ser = Series([], dtype=ArrowDtype(pa.struct([("field", pa.int64())]))) + + with pytest.raises(ValueError, match="name_or_index must be an int or str"): + ser.struct.field(1.1) + + +def test_struct_accessor_explode(): + index = Index([-100, 42, 123]) + ser = Series( + [ + {"painted": 1, "snapping": {"sea": "green"}}, + {"painted": 2, "snapping": {"sea": "leatherback"}}, + {"painted": 3, "snapping": {"sea": "hawksbill"}}, + ], + dtype=ArrowDtype( + pa.struct( + [ + ("painted", pa.int64()), + ("snapping", pa.struct([("sea", pa.string())])), + ] + ) + ), + index=index, + ) + actual = ser.struct.explode() + expected = DataFrame( + { + "painted": Series([1, 2, 3], index=index, dtype=ArrowDtype(pa.int64())), + "snapping": Series( + [{"sea": "green"}, {"sea": "leatherback"}, {"sea": "hawksbill"}], + index=index, + dtype=ArrowDtype(pa.struct([("sea", pa.string())])), + ), + }, + ) + tm.assert_frame_equal(actual, expected) + + +@pytest.mark.parametrize( + "invalid", + [ + pytest.param(Series([1, 2, 3], dtype="int64"), id="int64"), + pytest.param( + Series(["a", "b", "c"], dtype="string[pyarrow]"), id="string-pyarrow" + ), + ], +) +def test_struct_accessor_api_for_invalid(invalid): + msg = re.escape(StructAccessor._validation_msg.format(dtype=invalid.dtype)) + + with pytest.raises(AttributeError, match=msg): + invalid.struct From 6a2598052dd214bfb84ac8d0f6ecbb7586546cc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Mon, 18 Sep 2023 18:17:33 +0200 Subject: [PATCH 70/93] MAINT: Cleanup expired ndarray methods (#55180) --- doc/source/user_guide/gotchas.rst | 2 +- pandas/io/stata.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user_guide/gotchas.rst b/doc/source/user_guide/gotchas.rst index c00a236ff4e9d..99c85ac66623d 100644 --- a/doc/source/user_guide/gotchas.rst +++ b/doc/source/user_guide/gotchas.rst @@ -379,7 +379,7 @@ constructors using something similar to the following: .. ipython:: python x = np.array(list(range(10)), ">i4") # big endian - newx = x.byteswap().newbyteorder() # force native byteorder + newx = x.byteswap().view(x.dtype.newbyteorder()) # force native byteorder s = pd.Series(newx) See `the NumPy documentation on byte order diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7f19e62f40774..a6d17a604a23f 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1773,7 +1773,7 @@ def read( self._data_read = True # if necessary, swap the byte order to native here if self._byteorder != self._native_byteorder: - raw_data = raw_data.byteswap().newbyteorder() + raw_data = raw_data.byteswap().view(raw_data.dtype.newbyteorder()) if convert_categoricals: self._read_value_labels() From 8768cea987ae16e1e2c150d4acc2581ffff5421f Mon Sep 17 00:00:00 2001 From: jfadia <90651438+jfadia@users.noreply.github.com> Date: Mon, 18 Sep 2023 09:24:43 -0700 Subject: [PATCH 71/93] DOC: Update read_csv docs to reflect behaviour with `parse_dates` (#55176) * Update read_csv docs * Fix line length --- pandas/io/parsers/readers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index e826aad478059..acf35ebd6afe5 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -238,7 +238,8 @@ default False The behavior is as follows: - * ``bool``. If ``True`` -> try parsing the index. + * ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to + ``True`` if ``date_format`` or ``date_parser`` arguments have been passed. * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3 each as a separate date column. * ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse From 699a8e40a8257bba02b7b5bfa2218f272bdf1743 Mon Sep 17 00:00:00 2001 From: Moritz Schubert <35040095+mcnoat@users.noreply.github.com> Date: Mon, 18 Sep 2023 18:26:38 +0200 Subject: [PATCH 72/93] DOC: complete parameter descriptions for DataFrame.insert's docstring (#55172) complete parameter descriptions for DataFrame.insert's docstring issue: https://github.com/jorisvandenbossche/pydata-amsterdam-pandas-sprint/issues/1 - bullet point item #5: "complete the parameter descriptions" --- pandas/core/frame.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 124c21839afe7..4e87e90278e7b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4915,7 +4915,9 @@ def insert( column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like + Content of the inserted column. allow_duplicates : bool, optional, default lib.no_default + Allow duplicate column labels to be created. See Also -------- From 2b8decdccea5a70dc92ad58283edecf8b1b049bb Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 18 Sep 2023 09:32:27 -0700 Subject: [PATCH 73/93] CLN: de-duplicate code in _matplotlib.converter (#55155) * CLN: de-duplicate _matplotlib.converters * REF: avoid changing type of vmin/vmax * CLN: de-duplicate * REF: de-duplicate, annotate * REF: standardize * comment * mypy fixup --- pandas/_libs/tslibs/dtypes.pyx | 1 - pandas/plotting/_matplotlib/converter.py | 202 +++++++++++------------ pandas/tests/plotting/test_converter.py | 4 +- 3 files changed, 104 insertions(+), 103 deletions(-) diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index bafde9f3b237b..f0f73d242cdf0 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -460,7 +460,6 @@ cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) noexcept nogil: return NPY_DATETIMEUNIT.NPY_FR_D -# TODO: use in _matplotlib.converter? cpdef int64_t periods_per_day( NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns ) except? -1: diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 33aeaa6d81406..3f77a32014bff 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -11,7 +11,6 @@ from typing import ( TYPE_CHECKING, Any, - Final, cast, ) @@ -30,8 +29,14 @@ Timestamp, to_offset, ) -from pandas._libs.tslibs.dtypes import FreqGroup -from pandas._typing import F +from pandas._libs.tslibs.dtypes import ( + FreqGroup, + periods_per_day, +) +from pandas._typing import ( + F, + npt, +) from pandas.core.dtypes.common import ( is_float, @@ -60,15 +65,6 @@ from pandas._libs.tslibs.offsets import BaseOffset -# constants -HOURS_PER_DAY: Final = 24.0 -MIN_PER_HOUR: Final = 60.0 -SEC_PER_MIN: Final = 60.0 - -SEC_PER_HOUR: Final = SEC_PER_MIN * MIN_PER_HOUR -SEC_PER_DAY: Final = SEC_PER_HOUR * HOURS_PER_DAY - -MUSEC_PER_DAY: Final = 10**6 * SEC_PER_DAY _mpl_units = {} # Cache for units overwritten by us @@ -495,7 +491,7 @@ def _get_default_annual_spacing(nyears) -> tuple[int, int]: return (min_spacing, maj_spacing) -def period_break(dates: PeriodIndex, period: str) -> np.ndarray: +def _period_break(dates: PeriodIndex, period: str) -> npt.NDArray[np.intp]: """ Returns the indices where the given period changes. @@ -506,12 +502,17 @@ def period_break(dates: PeriodIndex, period: str) -> np.ndarray: period : str Name of the period to monitor. """ + mask = _period_break_mask(dates, period) + return np.nonzero(mask)[0] + + +def _period_break_mask(dates: PeriodIndex, period: str) -> npt.NDArray[np.bool_]: current = getattr(dates, period) previous = getattr(dates - 1 * dates.freq, period) - return np.nonzero(current - previous)[0] + return current != previous -def has_level_label(label_flags: np.ndarray, vmin: float) -> bool: +def has_level_label(label_flags: npt.NDArray[np.intp], vmin: float) -> bool: """ Returns true if the ``label_flags`` indicate there is at least one label for this level. @@ -527,54 +528,59 @@ def has_level_label(label_flags: np.ndarray, vmin: float) -> bool: return True -def _daily_finder(vmin, vmax, freq: BaseOffset): +def _get_periods_per_ymd(freq: BaseOffset) -> tuple[int, int, int]: # error: "BaseOffset" has no attribute "_period_dtype_code" dtype_code = freq._period_dtype_code # type: ignore[attr-defined] freq_group = FreqGroup.from_period_dtype_code(dtype_code) - periodsperday = -1 + ppd = -1 # placeholder for above-day freqs if dtype_code >= FreqGroup.FR_HR.value: - if freq_group == FreqGroup.FR_NS: - periodsperday = 24 * 60 * 60 * 1000000000 - elif freq_group == FreqGroup.FR_US: - periodsperday = 24 * 60 * 60 * 1000000 - elif freq_group == FreqGroup.FR_MS: - periodsperday = 24 * 60 * 60 * 1000 - elif freq_group == FreqGroup.FR_SEC: - periodsperday = 24 * 60 * 60 - elif freq_group == FreqGroup.FR_MIN: - periodsperday = 24 * 60 - elif freq_group == FreqGroup.FR_HR: - periodsperday = 24 - else: # pragma: no cover - raise ValueError(f"unexpected frequency: {dtype_code}") - periodsperyear = 365 * periodsperday - periodspermonth = 28 * periodsperday - + # error: "BaseOffset" has no attribute "_creso" + ppd = periods_per_day(freq._creso) # type: ignore[attr-defined] + ppm = 28 * ppd + ppy = 365 * ppd elif freq_group == FreqGroup.FR_BUS: - periodsperyear = 261 - periodspermonth = 19 + ppm = 19 + ppy = 261 elif freq_group == FreqGroup.FR_DAY: - periodsperyear = 365 - periodspermonth = 28 + ppm = 28 + ppy = 365 elif freq_group == FreqGroup.FR_WK: - periodsperyear = 52 - periodspermonth = 3 - else: # pragma: no cover - raise ValueError("unexpected frequency") + ppm = 3 + ppy = 52 + elif freq_group == FreqGroup.FR_MTH: + ppm = 1 + ppy = 12 + elif freq_group == FreqGroup.FR_QTR: + ppm = -1 # placerholder + ppy = 4 + elif freq_group == FreqGroup.FR_ANN: + ppm = -1 # placeholder + ppy = 1 + else: + raise NotImplementedError(f"Unsupported frequency: {dtype_code}") + + return ppd, ppm, ppy + + +def _daily_finder(vmin, vmax, freq: BaseOffset) -> np.ndarray: + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + + periodsperday, periodspermonth, periodsperyear = _get_periods_per_ymd(freq) # save this for later usage vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 - (vmin, vmax) = ( - Period(ordinal=int(vmin), freq=freq), - Period(ordinal=int(vmax), freq=freq), + dates_ = period_range( + start=Period(ordinal=vmin, freq=freq), + end=Period(ordinal=vmax, freq=freq), + freq=freq, ) - assert isinstance(vmin, Period) - assert isinstance(vmax, Period) - span = vmax.ordinal - vmin.ordinal + 1 - dates_ = period_range(start=vmin, end=vmax, freq=freq) + # Initialize the output info = np.zeros( span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")] @@ -595,45 +601,38 @@ def first_label(label_flags): # Case 1. Less than a month if span <= periodspermonth: - day_start = period_break(dates_, "day") - month_start = period_break(dates_, "month") + day_start = _period_break(dates_, "day") + month_start = _period_break(dates_, "month") + year_start = _period_break(dates_, "year") - def _hour_finder(label_interval, force_year_start) -> None: - _hour = dates_.hour - _prev_hour = (dates_ - 1 * dates_.freq).hour - hour_start = (_hour - _prev_hour) != 0 + def _hour_finder(label_interval: int, force_year_start: bool) -> None: + target = dates_.hour + mask = _period_break_mask(dates_, "hour") info_maj[day_start] = True - info_min[hour_start & (_hour % label_interval == 0)] = True - year_start = period_break(dates_, "year") - info_fmt[hour_start & (_hour % label_interval == 0)] = "%H:%M" + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M" info_fmt[day_start] = "%H:%M\n%d-%b" info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" if force_year_start and not has_level_label(year_start, vmin_orig): info_fmt[first_label(day_start)] = "%H:%M\n%d-%b\n%Y" - def _minute_finder(label_interval) -> None: - hour_start = period_break(dates_, "hour") - _minute = dates_.minute - _prev_minute = (dates_ - 1 * dates_.freq).minute - minute_start = (_minute - _prev_minute) != 0 + def _minute_finder(label_interval: int) -> None: + target = dates_.minute + hour_start = _period_break(dates_, "hour") + mask = _period_break_mask(dates_, "minute") info_maj[hour_start] = True - info_min[minute_start & (_minute % label_interval == 0)] = True - year_start = period_break(dates_, "year") - info_fmt = info["fmt"] - info_fmt[minute_start & (_minute % label_interval == 0)] = "%H:%M" + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M" info_fmt[day_start] = "%H:%M\n%d-%b" info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" - def _second_finder(label_interval) -> None: - minute_start = period_break(dates_, "minute") - _second = dates_.second - _prev_second = (dates_ - 1 * dates_.freq).second - second_start = (_second - _prev_second) != 0 - info["maj"][minute_start] = True - info["min"][second_start & (_second % label_interval == 0)] = True - year_start = period_break(dates_, "year") - info_fmt = info["fmt"] - info_fmt[second_start & (_second % label_interval == 0)] = "%H:%M:%S" + def _second_finder(label_interval: int) -> None: + target = dates_.second + minute_start = _period_break(dates_, "minute") + mask = _period_break_mask(dates_, "second") + info_maj[minute_start] = True + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M:%S" info_fmt[day_start] = "%H:%M:%S\n%d-%b" info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y" @@ -672,8 +671,6 @@ def _second_finder(label_interval) -> None: else: info_maj[month_start] = True info_min[day_start] = True - year_start = period_break(dates_, "year") - info_fmt = info["fmt"] info_fmt[day_start] = "%d" info_fmt[month_start] = "%d\n%b" info_fmt[year_start] = "%d\n%b\n%Y" @@ -685,15 +682,15 @@ def _second_finder(label_interval) -> None: # Case 2. Less than three months elif span <= periodsperyear // 4: - month_start = period_break(dates_, "month") + month_start = _period_break(dates_, "month") info_maj[month_start] = True if dtype_code < FreqGroup.FR_HR.value: info["min"] = True else: - day_start = period_break(dates_, "day") + day_start = _period_break(dates_, "day") info["min"][day_start] = True - week_start = period_break(dates_, "week") - year_start = period_break(dates_, "year") + week_start = _period_break(dates_, "week") + year_start = _period_break(dates_, "year") info_fmt[week_start] = "%d" info_fmt[month_start] = "\n\n%b" info_fmt[year_start] = "\n\n%b\n%Y" @@ -704,9 +701,9 @@ def _second_finder(label_interval) -> None: info_fmt[first_label(month_start)] = "\n\n%b\n%Y" # Case 3. Less than 14 months ............... elif span <= 1.15 * periodsperyear: - year_start = period_break(dates_, "year") - month_start = period_break(dates_, "month") - week_start = period_break(dates_, "week") + year_start = _period_break(dates_, "year") + month_start = _period_break(dates_, "month") + week_start = _period_break(dates_, "week") info_maj[month_start] = True info_min[week_start] = True info_min[year_start] = False @@ -717,17 +714,17 @@ def _second_finder(label_interval) -> None: info_fmt[first_label(month_start)] = "%b\n%Y" # Case 4. Less than 2.5 years ............... elif span <= 2.5 * periodsperyear: - year_start = period_break(dates_, "year") - quarter_start = period_break(dates_, "quarter") - month_start = period_break(dates_, "month") + year_start = _period_break(dates_, "year") + quarter_start = _period_break(dates_, "quarter") + month_start = _period_break(dates_, "month") info_maj[quarter_start] = True info_min[month_start] = True info_fmt[quarter_start] = "%b" info_fmt[year_start] = "%b\n%Y" # Case 4. Less than 4 years ................. elif span <= 4 * periodsperyear: - year_start = period_break(dates_, "year") - month_start = period_break(dates_, "month") + year_start = _period_break(dates_, "year") + month_start = _period_break(dates_, "month") info_maj[year_start] = True info_min[month_start] = True info_min[year_start] = False @@ -738,15 +735,15 @@ def _second_finder(label_interval) -> None: info_fmt[year_start] = "%b\n%Y" # Case 5. Less than 11 years ................ elif span <= 11 * periodsperyear: - year_start = period_break(dates_, "year") - quarter_start = period_break(dates_, "quarter") + year_start = _period_break(dates_, "year") + quarter_start = _period_break(dates_, "quarter") info_maj[year_start] = True info_min[quarter_start] = True info_min[year_start] = False info_fmt[year_start] = "%Y" # Case 6. More than 12 years ................ else: - year_start = period_break(dates_, "year") + year_start = _period_break(dates_, "year") year_break = dates_[year_start].year nyears = span / periodsperyear (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) @@ -759,8 +756,8 @@ def _second_finder(label_interval) -> None: return info -def _monthly_finder(vmin, vmax, freq): - periodsperyear = 12 +def _monthly_finder(vmin, vmax, freq: BaseOffset) -> np.ndarray: + _, _, periodsperyear = _get_periods_per_ymd(freq) vmin_orig = vmin (vmin, vmax) = (int(vmin), int(vmax)) @@ -795,6 +792,7 @@ def _monthly_finder(vmin, vmax, freq): quarter_start = (dates_ % 3 == 0).nonzero() info_maj[year_start] = True # TODO: Check the following : is it really info['fmt'] ? + # 2023-09-15 this is reached in test_finder_monthly info["fmt"][quarter_start] = True info["min"] = True @@ -829,8 +827,8 @@ def _monthly_finder(vmin, vmax, freq): return info -def _quarterly_finder(vmin, vmax, freq): - periodsperyear = 4 +def _quarterly_finder(vmin, vmax, freq: BaseOffset) -> np.ndarray: + _, _, periodsperyear = _get_periods_per_ymd(freq) vmin_orig = vmin (vmin, vmax) = (int(vmin), int(vmax)) span = vmax - vmin + 1 @@ -876,7 +874,8 @@ def _quarterly_finder(vmin, vmax, freq): return info -def _annual_finder(vmin, vmax, freq): +def _annual_finder(vmin, vmax, freq: BaseOffset) -> np.ndarray: + # Note: small difference here vs other finders in adding 1 to vmax (vmin, vmax) = (int(vmin), int(vmax + 1)) span = vmax - vmin + 1 @@ -889,8 +888,9 @@ def _annual_finder(vmin, vmax, freq): (min_anndef, maj_anndef) = _get_default_annual_spacing(span) major_idx = dates_ % maj_anndef == 0 + minor_idx = dates_ % min_anndef == 0 info["maj"][major_idx] = True - info["min"][(dates_ % min_anndef == 0)] = True + info["min"][minor_idx] = True info["fmt"][major_idx] = "%Y" return info @@ -1087,7 +1087,7 @@ def format_timedelta_ticks(x, pos, n_decimals: int) -> str: """ Convert seconds to 'D days HH:MM:SS.F' """ - s, ns = divmod(x, 10**9) + s, ns = divmod(x, 10**9) # TODO(non-nano): this looks like it assumes ns m, s = divmod(s, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 0108079f1110f..7d574b86cef36 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -10,6 +10,8 @@ import pandas._config.config as cf +from pandas._libs.tslibs import to_offset + from pandas import ( Index, Period, @@ -390,7 +392,7 @@ def test_quarterly_finder(year_span): pytest.skip("the quarterly finder is only invoked if the span is >= 45") nyears = span / 4 (min_anndef, maj_anndef) = converter._get_default_annual_spacing(nyears) - result = converter._quarterly_finder(vmin, vmax, "Q") + result = converter._quarterly_finder(vmin, vmax, to_offset("Q")) quarters = PeriodIndex( arrays.PeriodArray(np.array([x[0] for x in result]), dtype="period[Q]") ) From abbb86e7cc69e53f5ee22fdf3bd5a78e21ad2351 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Mon, 18 Sep 2023 18:37:34 +0200 Subject: [PATCH 74/93] docs: add explanation about `freq` units in `Timedelta.round()` (#55149) --- pandas/_libs/tslibs/timedeltas.pyx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 2d9fe93c397cb..2178e972b4d08 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1942,6 +1942,7 @@ class Timedelta(_Timedelta): ---------- freq : str Frequency string indicating the rounding resolution. + It uses the same units as class contructor :class:`~pandas.Timedelta`. Returns ------- @@ -1969,6 +1970,7 @@ class Timedelta(_Timedelta): ---------- freq : str Frequency string indicating the flooring resolution. + It uses the same units as class contructor :class:`~pandas.Timedelta`. Examples -------- @@ -1988,6 +1990,7 @@ class Timedelta(_Timedelta): ---------- freq : str Frequency string indicating the ceiling resolution. + It uses the same units as class contructor :class:`~pandas.Timedelta`. Examples -------- From e0c5b87850f924a2e7ab680979c628784d96181b Mon Sep 17 00:00:00 2001 From: Luke Manley Date: Mon, 18 Sep 2023 12:39:40 -0400 Subject: [PATCH 75/93] BUG: Index.difference not always returning a unique set of values (#55113) * BUG: Index.difference not always returning a unique set of values * whatsnew * udpate test * update test --- doc/source/whatsnew/v2.2.0.rst | 2 +- pandas/core/indexes/base.py | 4 ++-- pandas/tests/indexes/test_setops.py | 14 ++++++++++++-- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 9e55399afe5af..b229c9d22367e 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -262,7 +262,7 @@ Interval Indexing ^^^^^^^^ -- +- Bug in :meth:`Index.difference` not returning a unique set of values when ``other`` is empty or ``other`` is considered non-comparable (:issue:`55113`) - Missing diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8756bb3f3c81b..3adb4dfa227db 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3598,14 +3598,14 @@ def difference(self, other, sort=None): if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 - result = self.rename(result_name) + result = self.unique().rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything - result = self.rename(result_name) + result = self.unique().rename(result_name) if sort is True: return result.sort_values() return result diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index a64994efec85a..d6304774b87c4 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -796,11 +796,21 @@ def test_difference_name_preservation(self, index, second_name, expected, sort): assert result.name == expected def test_difference_empty_arg(self, index, sort): - first = index[5:20] + first = index.copy() + first = first[5:20] first.name = "name" result = first.difference([], sort) + expected = index[5:20].unique() + expected.name = "name" + tm.assert_index_equal(result, expected) - tm.assert_index_equal(result, first) + def test_difference_should_not_compare(self): + # GH 55113 + left = Index([1, 1]) + right = Index([True]) + result = left.difference(right) + expected = Index([1]) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize("index", ["string"], indirect=True) def test_difference_identity(self, index, sort): From 271144ae64d44399e9e23d147aa83281925a21fc Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 18 Sep 2023 06:54:12 -1000 Subject: [PATCH 76/93] BUG: Interval with Timestamp with tz shows tz (#55035) * BUG: Interval with Timestamp with tz shows tz * Add whatsnew * Fix tests --- doc/source/whatsnew/v2.2.0.rst | 2 +- pandas/_libs/interval.pyx | 21 +++--------------- pandas/_libs/tslibs/timestamps.pyx | 12 ---------- pandas/core/indexes/interval.py | 10 +++++---- pandas/tests/frame/methods/test_to_csv.py | 2 +- pandas/tests/indexes/interval/test_formats.py | 22 ++++++++++++++++++- pandas/tests/io/excel/test_writers.py | 8 +++---- 7 files changed, 36 insertions(+), 41 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index b229c9d22367e..e32f357ad3777 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -257,7 +257,7 @@ Strings Interval ^^^^^^^^ -- +- Bug in :class:`Interval` ``__repr__`` not displaying UTC offsets for :class:`Timestamp` bounds. Additionally the hour, minute and second components will now be shown. (:issue:`55015`) - Indexing diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index e07d80dd04b31..82f69c1dedd53 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -478,31 +478,16 @@ cdef class Interval(IntervalMixin): args = (self.left, self.right, self.closed) return (type(self), args) - def _repr_base(self): - left = self.left - right = self.right - - # TODO: need more general formatting methodology here - if isinstance(left, _Timestamp) and isinstance(right, _Timestamp): - left = left._short_repr - right = right._short_repr - - return left, right - def __repr__(self) -> str: - - left, right = self._repr_base() - disp = str if isinstance(left, np.generic) else repr + disp = str if isinstance(self.left, (np.generic, _Timestamp)) else repr name = type(self).__name__ - repr_str = f"{name}({disp(left)}, {disp(right)}, closed={repr(self.closed)})" + repr_str = f"{name}({disp(self.left)}, {disp(self.right)}, closed={repr(self.closed)})" # noqa: E501 return repr_str def __str__(self) -> str: - - left, right = self._repr_base() start_symbol = "[" if self.closed_left else "(" end_symbol = "]" if self.closed_right else ")" - return f"{start_symbol}{left}, {right}{end_symbol}" + return f"{start_symbol}{self.left}, {self.right}{end_symbol}" def __add__(self, y): if ( diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 944a2b0e97382..65d0d454ac817 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1078,18 +1078,6 @@ cdef class _Timestamp(ABCTimestamp): return result - @property - def _short_repr(self) -> str: - # format a Timestamp with only _date_repr if possible - # otherwise _repr_base - if (self.hour == 0 and - self.minute == 0 and - self.second == 0 and - self.microsecond == 0 and - self.nanosecond == 0): - return self._date_repr - return self._repr_base - # ----------------------------------------------------------------- # Conversion Methods diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b36672df32e61..eb8d25bcea592 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1039,8 +1039,9 @@ def interval_range( >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) - IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], - (2017-01-03, 2017-01-04]], + IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00], + (2017-01-02 00:00:00, 2017-01-03 00:00:00], + (2017-01-03 00:00:00, 2017-01-04 00:00:00]], dtype='interval[datetime64[ns], right]') The ``freq`` parameter specifies the frequency between the left and right. @@ -1056,8 +1057,9 @@ def interval_range( >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') - IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], - (2017-03-01, 2017-04-01]], + IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00], + (2017-02-01 00:00:00, 2017-03-01 00:00:00], + (2017-03-01 00:00:00, 2017-04-01 00:00:00]], dtype='interval[datetime64[ns], right]') Specify ``start``, ``end``, and ``periods``; the frequency is generated diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index 294da02e259b7..9f45347c31165 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -1326,6 +1326,6 @@ def test_to_csv_categorical_and_interval(self): ) df["a"] = df["a"].astype("category") result = df.to_csv() - expected_rows = [",a", '0,"[2020-01-01, 2020-01-02]"'] + expected_rows = [",a", '0,"[2020-01-01 00:00:00, 2020-01-02 00:00:00]"'] expected = tm.convert_rows_list_to_csv_str(expected_rows) assert result == expected diff --git a/pandas/tests/indexes/interval/test_formats.py b/pandas/tests/indexes/interval/test_formats.py index 4d6f3a62d4dd0..f003211abd857 100644 --- a/pandas/tests/indexes/interval/test_formats.py +++ b/pandas/tests/indexes/interval/test_formats.py @@ -80,7 +80,11 @@ def test_repr_floats(self): ((Timestamp("20180102"), Timestamp("20180103"))), ], "both", - ["[2018-01-01, 2018-01-02]", "NaN", "[2018-01-02, 2018-01-03]"], + [ + "[2018-01-01 00:00:00, 2018-01-02 00:00:00]", + "NaN", + "[2018-01-02 00:00:00, 2018-01-03 00:00:00]", + ], ), ( [ @@ -103,3 +107,19 @@ def test_to_native_types(self, tuples, closed, expected_data): result = index._format_native_types() expected = np.array(expected_data) tm.assert_numpy_array_equal(result, expected) + + def test_timestamp_with_timezone(self): + # GH 55035 + index = IntervalIndex( + [ + Interval( + Timestamp("2020-01-01", tz="UTC"), Timestamp("2020-01-02", tz="UTC") + ) + ] + ) + result = repr(index) + expected = ( + "IntervalIndex([(2020-01-01 00:00:00+00:00, 2020-01-02 00:00:00+00:00]], " + "dtype='interval[datetime64[ns, UTC], right]')" + ) + assert result == expected diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 9cbb29605a1ec..3944599b167c1 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -307,10 +307,10 @@ def test_multiindex_interval_datetimes(self, ext): [ range(4), [ - "(2020-01-31, 2020-07-31]", - "(2020-07-31, 2021-01-31]", - "(2021-01-31, 2021-07-31]", - "(2021-07-31, 2022-01-31]", + "(2020-01-31 00:00:00, 2020-07-31 00:00:00]", + "(2020-07-31 00:00:00, 2021-01-31 00:00:00]", + "(2021-01-31 00:00:00, 2021-07-31 00:00:00]", + "(2021-07-31 00:00:00, 2022-01-31 00:00:00]", ], ] ), From 66a49457900a6b50b9d2bf2bcf6664ef7351475b Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Mon, 18 Sep 2023 18:56:11 +0200 Subject: [PATCH 77/93] BUG df.plot.box handles matplotlib Axes with sharey=True (#54940) * BUG manage sharey in plot.box with vert=False * fix * add entry in whats new * iter --- doc/source/whatsnew/v2.2.0.rst | 2 +- pandas/plotting/_matplotlib/boxplot.py | 45 ++++++++++++-------- pandas/tests/plotting/test_boxplot_method.py | 16 +++++++ 3 files changed, 44 insertions(+), 19 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index e32f357ad3777..2afd190755b4c 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -288,7 +288,7 @@ Period Plotting ^^^^^^^^ -- +- Bug in :meth:`DataFrame.plot.box` with ``vert=False`` and a matplotlib ``Axes`` created with ``sharey=True`` (:issue:`54941`) - Groupby/resample/rolling diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 83cb8a6ab67dd..5fcea796a9c6e 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -40,6 +40,24 @@ from pandas._typing import MatplotlibColor +def _set_ticklabels(ax: Axes, labels: list[str], is_vertical: bool, **kwargs) -> None: + """Set the tick labels of a given axis. + + Due to https://github.com/matplotlib/matplotlib/pull/17266, we need to handle the + case of repeated ticks (due to `FixedLocator`) and thus we duplicate the number of + labels. + """ + ticks = ax.get_xticks() if is_vertical else ax.get_yticks() + if len(ticks) != len(labels): + i, remainder = divmod(len(ticks), len(labels)) + assert remainder == 0, remainder + labels *= i + if is_vertical: + ax.set_xticklabels(labels, **kwargs) + else: + ax.set_yticklabels(labels, **kwargs) + + class BoxPlot(LinePlot): @property def _kind(self) -> Literal["box"]: @@ -193,7 +211,9 @@ def _make_plot(self) -> None: ) self.maybe_color_bp(bp) self._return_obj[label] = ret - self._set_ticklabels(ax, ticklabels) + _set_ticklabels( + ax=ax, labels=ticklabels, is_vertical=self.orientation == "vertical" + ) else: y = self.data.values.T ax = self._get_ax(0) @@ -209,13 +229,9 @@ def _make_plot(self) -> None: labels = [pprint_thing(left) for left in labels] if not self.use_index: labels = [pprint_thing(key) for key in range(len(labels))] - self._set_ticklabels(ax, labels) - - def _set_ticklabels(self, ax: Axes, labels: list[str]) -> None: - if self.orientation == "vertical": - ax.set_xticklabels(labels) - else: - ax.set_yticklabels(labels) + _set_ticklabels( + ax=ax, labels=labels, is_vertical=self.orientation == "vertical" + ) def _make_legend(self) -> None: pass @@ -382,16 +398,9 @@ def plot_group(keys, values, ax: Axes, **kwds): ax.tick_params(axis="both", labelsize=fontsize) # GH 45465: x/y are flipped when "vert" changes - is_vertical = kwds.get("vert", True) - ticks = ax.get_xticks() if is_vertical else ax.get_yticks() - if len(ticks) != len(keys): - i, remainder = divmod(len(ticks), len(keys)) - assert remainder == 0, remainder - keys *= i - if is_vertical: - ax.set_xticklabels(keys, rotation=rot) - else: - ax.set_yticklabels(keys, rotation=rot) + _set_ticklabels( + ax=ax, labels=keys, is_vertical=kwds.get("vert", True), rotation=rot + ) maybe_color_bp(bp, **kwds) # Return axes in multiplot case, maybe revisit later # 985 diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 555b9fd0c82c2..76f7fa1f22eec 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -329,6 +329,22 @@ def test_plot_xlabel_ylabel(self, vert): assert ax.get_xlabel() == xlabel assert ax.get_ylabel() == ylabel + @pytest.mark.parametrize("vert", [True, False]) + def test_plot_box(self, vert): + # GH 54941 + rng = np.random.default_rng(2) + df1 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD")) + df2 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD")) + + xlabel, ylabel = "x", "y" + _, axs = plt.subplots(ncols=2, figsize=(10, 7), sharey=True) + df1.plot.box(ax=axs[0], vert=vert, xlabel=xlabel, ylabel=ylabel) + df2.plot.box(ax=axs[1], vert=vert, xlabel=xlabel, ylabel=ylabel) + for ax in axs: + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + mpl.pyplot.close() + @pytest.mark.parametrize("vert", [True, False]) def test_boxplot_xlabel_ylabel(self, vert): df = DataFrame( From 95b6057ebd5d774bf5f3ab90514fe5fcee2cbe90 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 18 Sep 2023 10:20:38 -0700 Subject: [PATCH 78/93] DEPR: downcasting in replace (#54710) * DEPR: downcasting in replace * GH refs * fix docbuild i hope * suppress doc warnings * avoid warning in docs --- doc/source/user_guide/missing_data.rst | 6 +- doc/source/whatsnew/v2.2.0.rst | 3 +- pandas/core/internals/blocks.py | 68 ++++++++++++++++++--- pandas/tests/frame/methods/test_replace.py | 62 +++++++++++++------ pandas/tests/indexing/test_coercion.py | 30 +++++++-- pandas/tests/io/excel/test_writers.py | 4 +- pandas/tests/series/methods/test_replace.py | 38 ++++++++---- 7 files changed, 162 insertions(+), 49 deletions(-) diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index e0e752099b77a..4a2aa565dd15c 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -401,7 +401,7 @@ Limit the number of NA values filled df.ffill(limit=1) -NA values can be replaced with corresponding value from a :class:`Series`` or :class:`DataFrame`` +NA values can be replaced with corresponding value from a :class:`Series` or :class:`DataFrame` where the index and column aligns between the original object and the filled object. .. ipython:: python @@ -660,7 +660,7 @@ Pass a list of regular expressions that will replace matches with a scalar. .. ipython:: python - df.replace([r"\s*\.\s*", r"a|b"], np.nan, regex=True) + df.replace([r"\s*\.\s*", r"a|b"], "placeholder", regex=True) All of the regular expression examples can also be passed with the ``to_replace`` argument as the ``regex`` argument. In this case the ``value`` @@ -669,7 +669,7 @@ dictionary. .. ipython:: python - df.replace(regex=[r"\s*\.\s*", r"a|b"], value=np.nan) + df.replace(regex=[r"\s*\.\s*", r"a|b"], value="placeholder") .. note:: diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 2afd190755b4c..2a7b06ce0c4af 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -190,7 +190,8 @@ Deprecations - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_parquet` except ``path``. (:issue:`54229`) - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_pickle` except ``path``. (:issue:`54229`) - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_string` except ``buf``. (:issue:`54229`) -- Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.downcasting", True)`` (:issue:`53656`) +- Deprecated automatic downcasting of object-dtype results in :meth:`Series.replace` and :meth:`DataFrame.replace`, explicitly call ``result = result.infer_objects(copy=False)`` instead. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`54710`) +- Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`53656`) - Deprecated including the groups in computations when using :meth:`DataFrameGroupBy.apply` and :meth:`DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`) - Deprecated not passing a tuple to :class:`DataFrameGroupBy.get_group` or :class:`SeriesGroupBy.get_group` when grouping by a length-1 list-like (:issue:`25971`) - Deprecated strings ``S``, ``U``, and ``N`` denoting units in :func:`to_timedelta` (:issue:`52536`) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6399f85723ae5..57833c1d626ee 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -761,7 +761,23 @@ def replace( if not (self.is_object and value is None): # if the user *explicitly* gave None, we keep None, otherwise # may downcast to NaN - blocks = blk.convert(copy=False, using_cow=using_cow) + if get_option("future.no_silent_downcasting") is True: + blocks = [blk] + else: + blocks = blk.convert(copy=False, using_cow=using_cow) + if len(blocks) > 1 or blocks[0].dtype != blk.dtype: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated and " + "will be removed in a future version. To retain the old " + "behavior, explicitly call " + "`result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) else: blocks = [blk] return blocks @@ -836,7 +852,21 @@ def _replace_regex( replace_regex(block.values, rx, value, mask) - return block.convert(copy=False, using_cow=using_cow) + nbs = block.convert(copy=False, using_cow=using_cow) + opt = get_option("future.no_silent_downcasting") + if (len(nbs) > 1 or nbs[0].dtype != block.dtype) and not opt: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated and " + "will be removed in a future version. To retain the old " + "behavior, explicitly call `result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return nbs @final def replace_list( @@ -902,6 +932,7 @@ def replace_list( else: rb = [self if inplace else self.copy()] + opt = get_option("future.no_silent_downcasting") for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): convert = i == src_len # only convert once at the end new_rb: list[Block] = [] @@ -939,14 +970,33 @@ def replace_list( b.refs.referenced_blocks.index(ref) ) - if convert and blk.is_object and not all(x is None for x in dest_list): + if ( + not opt + and convert + and blk.is_object + and not all(x is None for x in dest_list) + ): # GH#44498 avoid unwanted cast-back - result = extend_blocks( - [ - b.convert(copy=True and not using_cow, using_cow=using_cow) - for b in result - ] - ) + nbs = [] + for res_blk in result: + converted = res_blk.convert( + copy=True and not using_cow, using_cow=using_cow + ) + if len(converted) > 1 or converted[0].dtype != res_blk.dtype: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated " + "and will be removed in a future version. To " + "retain the old behavior, explicitly call " + "`result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + nbs.extend(converted) + result = nbs new_rb.extend(result) rb = new_rb return rb diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 61e44b4e24c08..f07c53060a06b 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -289,7 +289,9 @@ def test_regex_replace_dict_nested_non_first_character(self, any_string_dtype): def test_regex_replace_dict_nested_gh4115(self): df = DataFrame({"Type": ["Q", "T", "Q", "Q", "T"], "tmp": 2}) expected = DataFrame({"Type": [0, 1, 0, 0, 1], "tmp": 2}) - result = df.replace({"Type": {"Q": 0, "T": 1}}) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.replace({"Type": {"Q": 0, "T": 1}}) tm.assert_frame_equal(result, expected) def test_regex_replace_list_to_scalar(self, mix_abc): @@ -301,16 +303,20 @@ def test_regex_replace_list_to_scalar(self, mix_abc): "c": [np.nan, np.nan, np.nan, "d"], } ) - res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True) res2 = df.copy() res3 = df.copy() - return_value = res2.replace( - [r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True - ) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = res2.replace( + [r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True + ) assert return_value is None - return_value = res3.replace( - regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True - ) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = res3.replace( + regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True + ) assert return_value is None tm.assert_frame_equal(res, expec) tm.assert_frame_equal(res2, expec) @@ -520,7 +526,9 @@ def test_replace_convert(self): # gh 3907 df = DataFrame([["foo", "bar", "bah"], ["bar", "foo", "bah"]]) m = {"foo": 1, "bar": 2, "bah": 3} - rep = df.replace(m) + msg = "Downcasting behavior in `replace` " + with tm.assert_produces_warning(FutureWarning, match=msg): + rep = df.replace(m) expec = Series([np.int64] * 3) res = rep.dtypes tm.assert_series_equal(expec, res) @@ -838,7 +846,12 @@ def test_replace_for_new_dtypes(self, datetime_frame): ], ) def test_replace_dtypes(self, frame, to_replace, value, expected): - result = frame.replace(to_replace, value) + warn = None + if isinstance(to_replace, datetime) and to_replace.year == 2920: + warn = FutureWarning + msg = "Downcasting behavior in `replace` " + with tm.assert_produces_warning(warn, match=msg): + result = frame.replace(to_replace, value) tm.assert_frame_equal(result, expected) def test_replace_input_formats_listlike(self): @@ -927,7 +940,9 @@ def test_replace_dict_no_regex(self): "Strongly Disagree": 1, } expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}) - result = answer.replace(weights) + msg = "Downcasting behavior in `replace` " + with tm.assert_produces_warning(FutureWarning, match=msg): + result = answer.replace(weights) tm.assert_series_equal(result, expected) def test_replace_series_no_regex(self): @@ -950,7 +965,9 @@ def test_replace_series_no_regex(self): } ) expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}) - result = answer.replace(weights) + msg = "Downcasting behavior in `replace` " + with tm.assert_produces_warning(FutureWarning, match=msg): + result = answer.replace(weights) tm.assert_series_equal(result, expected) def test_replace_dict_tuple_list_ordering_remains_the_same(self): @@ -1076,7 +1093,9 @@ def test_replace_period(self): expected = DataFrame({"fname": [d["fname"][k] for k in df.fname.values]}) assert expected.dtypes.iloc[0] == "Period[M]" - result = df.replace(d) + msg = "Downcasting behavior in `replace` " + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.replace(d) tm.assert_frame_equal(result, expected) def test_replace_datetime(self): @@ -1106,7 +1125,9 @@ def test_replace_datetime(self): ) assert set(df.fname.values) == set(d["fname"].keys()) expected = DataFrame({"fname": [d["fname"][k] for k in df.fname.values]}) - result = df.replace(d) + msg = "Downcasting behavior in `replace` " + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.replace(d) tm.assert_frame_equal(result, expected) def test_replace_datetimetz(self): @@ -1307,10 +1328,12 @@ def test_replace_commutative(self, df, to_replace, exp): np.float64(1), ], ) - def test_replace_replacer_dtype(self, request, replacer): + def test_replace_replacer_dtype(self, replacer): # GH26632 df = DataFrame(["a"]) - result = df.replace({"a": replacer, "b": replacer}) + msg = "Downcasting behavior in `replace` " + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.replace({"a": replacer, "b": replacer}) expected = DataFrame([replacer]) tm.assert_frame_equal(result, expected) @@ -1564,12 +1587,15 @@ def test_replace_regex_dtype_frame(self, regex): # GH-48644 df1 = DataFrame({"A": ["0"], "B": ["0"]}) expected_df1 = DataFrame({"A": [1], "B": [1]}) - result_df1 = df1.replace(to_replace="0", value=1, regex=regex) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + result_df1 = df1.replace(to_replace="0", value=1, regex=regex) tm.assert_frame_equal(result_df1, expected_df1) df2 = DataFrame({"A": ["0"], "B": ["1"]}) expected_df2 = DataFrame({"A": [1], "B": ["1"]}) - result_df2 = df2.replace(to_replace="0", value=1, regex=regex) + with tm.assert_produces_warning(FutureWarning, match=msg): + result_df2 = df2.replace(to_replace="0", value=1, regex=regex) tm.assert_frame_equal(result_df2, expected_df2) def test_replace_with_value_also_being_replaced(self): diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 2c39729097487..82368c67dc6d4 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -836,8 +836,6 @@ def test_replace_series(self, how, to_key, from_key, replacer): # tested below return - result = obj.replace(replacer) - if (from_key == "float64" and to_key in ("int64")) or ( from_key == "complex128" and to_key in ("int64", "float64") ): @@ -851,6 +849,17 @@ def test_replace_series(self, how, to_key, from_key, replacer): exp = pd.Series(self.rep[to_key], index=index, name="yyy") assert exp.dtype == to_key + msg = "Downcasting behavior in `replace`" + warn = FutureWarning + if ( + exp.dtype == obj.dtype + or exp.dtype == object + or (exp.dtype.kind in "iufc" and obj.dtype.kind in "iufc") + ): + warn = None + with tm.assert_produces_warning(warn, match=msg): + result = obj.replace(replacer) + tm.assert_series_equal(result, exp) @pytest.mark.parametrize( @@ -866,11 +875,14 @@ def test_replace_series_datetime_tz(self, how, to_key, from_key, replacer): obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key - result = obj.replace(replacer) - exp = pd.Series(self.rep[to_key], index=index, name="yyy") assert exp.dtype == to_key + msg = "Downcasting behavior in `replace`" + warn = FutureWarning if exp.dtype != object else None + with tm.assert_produces_warning(warn, match=msg): + result = obj.replace(replacer) + tm.assert_series_equal(result, exp) @pytest.mark.parametrize( @@ -888,16 +900,22 @@ def test_replace_series_datetime_datetime(self, how, to_key, from_key, replacer) obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key - result = obj.replace(replacer) - exp = pd.Series(self.rep[to_key], index=index, name="yyy") + warn = FutureWarning if isinstance(obj.dtype, pd.DatetimeTZDtype) and isinstance( exp.dtype, pd.DatetimeTZDtype ): # with mismatched tzs, we retain the original dtype as of 2.0 exp = exp.astype(obj.dtype) + warn = None else: assert exp.dtype == to_key + if to_key == from_key: + warn = None + + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(warn, match=msg): + result = obj.replace(replacer) tm.assert_series_equal(result, exp) diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 3944599b167c1..bbd427387625b 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -1197,7 +1197,9 @@ def test_render_as_column_name(self, path): def test_true_and_false_value_options(self, path): # see gh-13347 df = DataFrame([["foo", "bar"]], columns=["col1", "col2"]) - expected = df.replace({"foo": True, "bar": False}) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.replace({"foo": True, "bar": False}) df.to_excel(path) read_frame = pd.read_excel( diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index ec9db8c3830d6..f08966c3816c0 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -76,7 +76,9 @@ def test_replace(self): ser[20:30] = "bar" # replace list with a single value - rs = ser.replace([np.nan, "foo", "bar"], -1) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = ser.replace([np.nan, "foo", "bar"], -1) assert (rs[:5] == -1).all() assert (rs[6:10] == -1).all() @@ -84,7 +86,8 @@ def test_replace(self): assert (pd.isna(ser[:5])).all() # replace with different values - rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3}) + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3}) assert (rs[:5] == -1).all() assert (rs[6:10] == -2).all() @@ -92,11 +95,13 @@ def test_replace(self): assert (pd.isna(ser[:5])).all() # replace with different values with 2 lists - rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3]) + with tm.assert_produces_warning(FutureWarning, match=msg): + rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3]) tm.assert_series_equal(rs, rs2) # replace inplace - return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True) assert return_value is None assert (ser[:5] == -1).all() @@ -294,7 +299,9 @@ def test_replace2(self): ser[20:30] = "bar" # replace list with a single value - rs = ser.replace([np.nan, "foo", "bar"], -1) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = ser.replace([np.nan, "foo", "bar"], -1) assert (rs[:5] == -1).all() assert (rs[6:10] == -1).all() @@ -302,7 +309,8 @@ def test_replace2(self): assert (pd.isna(ser[:5])).all() # replace with different values - rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3}) + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3}) assert (rs[:5] == -1).all() assert (rs[6:10] == -2).all() @@ -310,11 +318,13 @@ def test_replace2(self): assert (pd.isna(ser[:5])).all() # replace with different values with 2 lists - rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3]) + with tm.assert_produces_warning(FutureWarning, match=msg): + rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3]) tm.assert_series_equal(rs, rs2) # replace inplace - return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True) assert return_value is None assert (ser[:5] == -1).all() assert (ser[6:10] == -1).all() @@ -373,7 +383,9 @@ def test_replace_unicode_with_number(self): def test_replace_mixed_types_with_string(self): # Testing mixed s = pd.Series([1, 2, 3, "4", 4, 5]) - result = s.replace([2, "4"], np.nan) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.replace([2, "4"], np.nan) expected = pd.Series([1, np.nan, 3, np.nan, 4, 5]) tm.assert_series_equal(expected, result) @@ -387,7 +399,9 @@ def test_replace_mixed_types_with_string(self): def test_replace_categorical(self, categorical, numeric): # GH 24971, GH#23305 ser = pd.Series(categorical) - result = ser.replace({"A": 1, "B": 2}) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.replace({"A": 1, "B": 2}) expected = pd.Series(numeric).astype("category") if 2 not in expected.cat.categories: # i.e. categories should be [1, 2] even if there are no "B"s present @@ -710,7 +724,9 @@ def test_replace_regex_dtype_series(self, regex): # GH-48644 series = pd.Series(["0"]) expected = pd.Series([1]) - result = series.replace(to_replace="0", value=1, regex=regex) + msg = "Downcasting behavior in `replace`" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = series.replace(to_replace="0", value=1, regex=regex) tm.assert_series_equal(result, expected) def test_replace_different_int_types(self, any_int_numpy_dtype): From 1496630d35337425d860128cad2e8bc624b9b25d Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 18 Sep 2023 10:22:09 -0700 Subject: [PATCH 79/93] DEPR: fillna downcasting from object dtype (#54261) * DEPR: fillna downcasting from object dtype * GH ref * suppress warning * update test * Update doc/source/whatsnew/v2.1.0.rst Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --------- Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- doc/source/whatsnew/v2.2.0.rst | 1 + pandas/core/generic.py | 9 ++++++- pandas/core/internals/blocks.py | 28 +++++++++++++++++++-- pandas/io/formats/xml.py | 9 ++++++- pandas/io/json/_json.py | 11 +++++++- pandas/io/stata.py | 9 ++++++- pandas/plotting/_matplotlib/core.py | 8 +++++- pandas/tests/extension/test_masked.py | 11 +++++++- pandas/tests/frame/indexing/test_where.py | 2 ++ pandas/tests/frame/methods/test_fillna.py | 7 ++++-- pandas/tests/frame/test_arithmetic.py | 12 ++++++--- pandas/tests/frame/test_logical_ops.py | 1 + pandas/tests/frame/test_reductions.py | 1 + pandas/tests/frame/test_stack_unstack.py | 2 ++ pandas/tests/groupby/test_function.py | 1 + pandas/tests/series/methods/test_reindex.py | 8 ++++-- pandas/tests/series/test_api.py | 1 + pandas/tests/series/test_arithmetic.py | 10 +++++--- pandas/tests/series/test_logical_ops.py | 1 + 19 files changed, 113 insertions(+), 19 deletions(-) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 2a7b06ce0c4af..55a3419e95703 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -198,6 +198,7 @@ Deprecations - Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting frequencies in :class:`Minute`, :class:`Second`, :class:`Milli`, :class:`Micro`, :class:`Nano` (:issue:`52536`) - Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting units in :class:`Timedelta` (:issue:`52536`) - Deprecated the extension test classes ``BaseNoReduceTests``, ``BaseBooleanReduceTests``, and ``BaseNumericReduceTests``, use ``BaseReduceTests`` instead (:issue:`54663`) +- Deprecating downcasting the results of :meth:`DataFrame.fillna`, :meth:`Series.fillna`, :meth:`DataFrame.ffill`, :meth:`Series.ffill`, :meth:`DataFrame.bfill`, :meth:`Series.bfill` in object-dtype cases. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`54261`) .. --------------------------------------------------------------------------- .. _whatsnew_220.performance: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bbaa6d3faeb05..271ad40a98272 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10393,7 +10393,14 @@ def _where( # make sure we are boolean fill_value = bool(inplace) - cond = cond.fillna(fill_value) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + cond = cond.fillna(fill_value) + cond = cond.infer_objects(copy=False) msg = "Boolean array expected for the condition, not {dtype}" diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 57833c1d626ee..66b01dfb59f7f 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -498,7 +498,11 @@ def coerce_to_target_dtype(self, other, warn_on_upcast: bool = False) -> Block: @final def _maybe_downcast( - self, blocks: list[Block], downcast, using_cow: bool, caller: str + self, + blocks: list[Block], + downcast, + using_cow: bool, + caller: str, ) -> list[Block]: if downcast is False: return blocks @@ -510,9 +514,29 @@ def _maybe_downcast( # but ATM it breaks too much existing code. # split and convert the blocks + if caller == "fillna" and get_option("future.no_silent_downcasting"): + return blocks + nbs = extend_blocks( [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] ) + if caller == "fillna": + if len(nbs) != len(blocks) or not all( + x.dtype == y.dtype for x, y in zip(nbs, blocks) + ): + # GH#54261 + warnings.warn( + "Downcasting object dtype arrays on .fillna, .ffill, .bfill " + "is deprecated and will change in a future version. " + "Call result.infer_objects(copy=False) instead. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return nbs elif downcast is None: return blocks @@ -1549,7 +1573,7 @@ def pad_or_backfill( data = extract_array(new_values, extract_numpy=True) nb = self.make_block_same_class(data, refs=refs) - return nb._maybe_downcast([nb], downcast, using_cow, caller="pad_or_backfill") + return nb._maybe_downcast([nb], downcast, using_cow, caller="fillna") @final def interpolate( diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py index 76b938755755a..a6ee8407988ec 100644 --- a/pandas/io/formats/xml.py +++ b/pandas/io/formats/xml.py @@ -9,6 +9,7 @@ TYPE_CHECKING, Any, ) +import warnings from pandas.errors import AbstractMethodError from pandas.util._decorators import doc @@ -202,7 +203,13 @@ def process_dataframe(self) -> dict[int | str, dict[str, Any]]: df = df.reset_index() if self.na_rep is not None: - df = df.fillna(self.na_rep) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + df = df.fillna(self.na_rep) return df.to_dict(orient="index") diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 52ea072d1483f..ecab14a54beff 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -1217,7 +1217,16 @@ def _try_convert_data( if not self.dtype: if all(notna(data)): return data, False - return data.fillna(np.nan), True + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + filled = data.fillna(np.nan) + + return filled, True elif self.dtype is True: pass diff --git a/pandas/io/stata.py b/pandas/io/stata.py index a6d17a604a23f..d630a5ff8a41c 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -2983,7 +2983,14 @@ def _prepare_data(self) -> np.rec.recarray: for i, col in enumerate(data): typ = typlist[i] if typ <= self._max_string_length: - data[col] = data[col].fillna("").apply(_pad_bytes, args=(typ,)) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + dc = data[col].fillna("") + data[col] = dc.apply(_pad_bytes, args=(typ,)) stype = f"S{typ}" dtypes[col] = stype data[col] = data[col].astype(stype) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index c62f73271577d..d88605db60720 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1538,7 +1538,13 @@ def _kind(self) -> Literal["area"]: def __init__(self, data, **kwargs) -> None: kwargs.setdefault("stacked", True) - data = data.fillna(value=0) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + data = data.fillna(value=0) LinePlot.__init__(self, data, **kwargs) if not self.stacked: diff --git a/pandas/tests/extension/test_masked.py b/pandas/tests/extension/test_masked.py index f5b0b6f4efa98..d27e9b8b9e983 100644 --- a/pandas/tests/extension/test_masked.py +++ b/pandas/tests/extension/test_masked.py @@ -13,6 +13,8 @@ be added to the array-specific tests in `pandas/tests/arrays/`. """ +import warnings + import numpy as np import pytest @@ -186,7 +188,14 @@ def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): if sdtype.kind in "iu": if op_name in ("__rtruediv__", "__truediv__", "__div__"): - expected = expected.fillna(np.nan).astype("Float64") + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + filled = expected.fillna(np.nan) + expected = filled.astype("Float64") else: # combine method result in 'biggest' (int64) dtype expected = expected.astype(sdtype) diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 1eb67671da0b8..4576a86ad27cd 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -96,6 +96,7 @@ def test_where_upcasting(self): tm.assert_series_equal(result, expected) + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") def test_where_alignment(self, where_frame, float_string_frame): # aligning def _check_align(df, cond, other, check_dtypes=True): @@ -170,6 +171,7 @@ def test_where_invalid(self): with pytest.raises(ValueError, match=msg): df.mask(0) + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") def test_where_set(self, where_frame, float_string_frame, mixed_int_frame): # where inplace diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py index 812150bb860e9..52b4b64ee279f 100644 --- a/pandas/tests/frame/methods/test_fillna.py +++ b/pandas/tests/frame/methods/test_fillna.py @@ -360,7 +360,9 @@ def test_fillna_dtype_conversion(self): expected = Series([np.dtype("object")] * 5, index=[1, 2, 3, 4, 5]) tm.assert_series_equal(result, expected) - result = df.fillna(1) + msg = "Downcasting object dtype arrays" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.fillna(1) expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5]) tm.assert_frame_equal(result, expected) @@ -817,7 +819,8 @@ def test_fillna_nones_inplace(): [[None, None], [None, None]], columns=["A", "B"], ) - with tm.assert_produces_warning(False): + msg = "Downcasting object dtype arrays" + with tm.assert_produces_warning(FutureWarning, match=msg): df.fillna(value={"A": 1, "B": 2}, inplace=True) expected = DataFrame([[1, 2], [1, 2]], columns=["A", "B"]) diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 878e94c15e16b..1488fa65fabc0 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1254,7 +1254,9 @@ def test_operators_none_as_na(self, op): # since filling converts dtypes from object, changed expected to be # object - filled = df.fillna(np.nan) + msg = "Downcasting object dtype arrays" + with tm.assert_produces_warning(FutureWarning, match=msg): + filled = df.fillna(np.nan) result = op(df, 3) expected = op(filled, 3).astype(object) expected[pd.isna(expected)] = np.nan @@ -1265,10 +1267,14 @@ def test_operators_none_as_na(self, op): expected[pd.isna(expected)] = np.nan tm.assert_frame_equal(result, expected) - result = op(df, df.fillna(7)) + msg = "Downcasting object dtype arrays" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = op(df, df.fillna(7)) tm.assert_frame_equal(result, expected) - result = op(df.fillna(7), df) + msg = "Downcasting object dtype arrays" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = op(df.fillna(7), df) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)]) diff --git a/pandas/tests/frame/test_logical_ops.py b/pandas/tests/frame/test_logical_ops.py index 2cc3b67e7ac02..a15d7d7f93f01 100644 --- a/pandas/tests/frame/test_logical_ops.py +++ b/pandas/tests/frame/test_logical_ops.py @@ -151,6 +151,7 @@ def _check_unary_op(op): _check_unary_op(operator.inv) # TODO: belongs elsewhere + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") def test_logical_with_nas(self): d = DataFrame({"a": [np.nan, False], "b": [True, True]}) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index e7b6a0c0b39b0..74473bc54d51e 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1155,6 +1155,7 @@ def test_any_all_mixed_float(self, opname, axis, bool_only, float_string_frame): def test_any_all_bool_with_na(self, opname, axis, bool_frame_with_na): getattr(bool_frame_with_na, opname)(axis=axis, bool_only=False) + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize("opname", ["any", "all"]) def test_any_all_bool_frame(self, opname, bool_frame_with_na): # GH#12863: numpy gives back non-boolean data for object type diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index b54a795af4fdc..9b76ae093e8c4 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -1184,6 +1184,7 @@ def test_stack_preserve_categorical_dtype_values(self, future_stack): ) tm.assert_series_equal(result, expected) + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize( "index, columns", [ @@ -1194,6 +1195,7 @@ def test_stack_preserve_categorical_dtype_values(self, future_stack): ) def test_stack_multi_columns_non_unique_index(self, index, columns, future_stack): # GH-28301 + df = DataFrame(index=index, columns=columns).fillna(1) stacked = df.stack(future_stack=future_stack) new_index = MultiIndex.from_tuples(stacked.index.to_numpy()) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 287310a18c7df..41bbfcf6840a9 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1534,6 +1534,7 @@ def test_numeric_only(kernel, has_arg, numeric_only, keys): method(*args, **kwargs) +@pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize("dtype", [bool, int, float, object]) def test_deprecate_numeric_only_series(dtype, groupby_func, request): # GH#46560 diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py index 016208f2d2026..f3075c116883a 100644 --- a/pandas/tests/series/methods/test_reindex.py +++ b/pandas/tests/series/methods/test_reindex.py @@ -152,7 +152,9 @@ def test_reindex_inference(): # inference of new dtype s = Series([True, False, False, True], index=list("abcd")) new_index = "agc" - result = s.reindex(list(new_index)).ffill() + msg = "Downcasting object dtype arrays on" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.reindex(list(new_index)).ffill() expected = Series([True, True, False], index=list(new_index)) tm.assert_series_equal(result, expected) @@ -160,7 +162,9 @@ def test_reindex_inference(): def test_reindex_downcasting(): # GH4618 shifted series downcasting s = Series(False, index=range(5)) - result = s.shift(1).bfill() + msg = "Downcasting object dtype arrays on" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.shift(1).bfill() expected = Series(False, index=range(5)) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index be63d9500ce73..a39b3ff7e6f2b 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -203,6 +203,7 @@ def test_series_datetimelike_attribute_access_invalid(self): with pytest.raises(AttributeError, match=msg): ser.weekday + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize( "kernel, has_numeric_only", [ diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 44121cb5f784f..55fc77fb5705f 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -639,10 +639,12 @@ def test_comparison_operators_with_nas(self, comparison_op): result = comparison_op(ser, val) expected = comparison_op(ser.dropna(), val).reindex(ser.index) - if comparison_op is operator.ne: - expected = expected.fillna(True).astype(bool) - else: - expected = expected.fillna(False).astype(bool) + msg = "Downcasting object dtype arrays" + with tm.assert_produces_warning(FutureWarning, match=msg): + if comparison_op is operator.ne: + expected = expected.fillna(True).astype(bool) + else: + expected = expected.fillna(False).astype(bool) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py index 26046ef9ba295..2146e154dc7fa 100644 --- a/pandas/tests/series/test_logical_ops.py +++ b/pandas/tests/series/test_logical_ops.py @@ -15,6 +15,7 @@ class TestSeriesLogicalOps: + @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor]) def test_bool_operators_with_nas(self, bool_op): # boolean &, |, ^ should work with object arrays and propagate NAs From 6dc4a6598172d75f7dfd383af75b1c9efb0f1c11 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 18 Sep 2023 09:00:44 -1000 Subject: [PATCH 80/93] STY: Fix precommit failure (#55186) --- pandas/_libs/tslibs/timedeltas.pyx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 2178e972b4d08..2f6fa35cae070 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1942,7 +1942,7 @@ class Timedelta(_Timedelta): ---------- freq : str Frequency string indicating the rounding resolution. - It uses the same units as class contructor :class:`~pandas.Timedelta`. + It uses the same units as class constructor :class:`~pandas.Timedelta`. Returns ------- @@ -1970,7 +1970,7 @@ class Timedelta(_Timedelta): ---------- freq : str Frequency string indicating the flooring resolution. - It uses the same units as class contructor :class:`~pandas.Timedelta`. + It uses the same units as class constructor :class:`~pandas.Timedelta`. Examples -------- @@ -1990,7 +1990,7 @@ class Timedelta(_Timedelta): ---------- freq : str Frequency string indicating the ceiling resolution. - It uses the same units as class contructor :class:`~pandas.Timedelta`. + It uses the same units as class constructor :class:`~pandas.Timedelta`. Examples -------- From 025e6ae2e9de9036c0b64f9f6571f00888b9f403 Mon Sep 17 00:00:00 2001 From: Rajat Subhra Mukherjee Date: Tue, 19 Sep 2023 01:12:38 +0530 Subject: [PATCH 81/93] DOC: added deprecation message in docs for DataFrame.rolling (#55177) * added deprecation message in docs * requested changes --- pandas/core/window/rolling.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index becbba703f92c..72e94d049a9de 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -937,6 +937,11 @@ class Window(BaseWindow): For `Series` this parameter is unused and defaults to 0. + .. deprecated:: 2.1.0 + + The axis keyword is deprecated. For ``axis=1``, + transpose the DataFrame first instead. + closed : str, default None If ``'right'``, the first point in the window is excluded from calculations. From 68268458ae3091875841b750c9763e64de835e2f Mon Sep 17 00:00:00 2001 From: Natalia Mokeeva <91160475+natmokval@users.noreply.github.com> Date: Mon, 18 Sep 2023 22:19:01 +0200 Subject: [PATCH 82/93] DOC: correct an example in whatsnew v0.14.0.rst (#55182) --- doc/source/whatsnew/v0.14.0.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst index 9c537b3a48c74..33c3de577c063 100644 --- a/doc/source/whatsnew/v0.14.0.rst +++ b/doc/source/whatsnew/v0.14.0.rst @@ -349,9 +349,15 @@ More consistent behavior for some groupby methods: - groupby head and tail respect column selection: - .. ipython:: python + .. code-block:: ipython + + In [19]: g[['B']].head(1) + Out[19]: + B + 0 2 + 2 6 - g[['B']].head(1) + [2 rows x 1 columns] - groupby ``nth`` now reduces by default; filtering can be achieved by passing ``as_index=False``. With an optional ``dropna`` argument to ignore NaN. See :ref:`the docs `. From 6b3e66b53d10e19e83b51f14a66a335d5ff3394b Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Mon, 18 Sep 2023 16:37:33 -0500 Subject: [PATCH 83/93] ENH: add `ExtensionArray._explode` method; adjust pyarrow extension for use of new interface (#54834) * add ExtensionArray._explode method; adjust pyarrow extension for use * black * add to whatsnew 2.1.0 * pre-commit fix * add _explode to docs * Update pandas/core/arrays/arrow/array.py Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> * switch whatsnew files * adjust docstring * fix docstring --------- Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- doc/source/reference/extensions.rst | 1 + doc/source/whatsnew/v2.2.0.rst | 1 + pandas/core/arrays/arrow/array.py | 4 +++ pandas/core/arrays/base.py | 36 +++++++++++++++++++++ pandas/core/series.py | 7 ++-- pandas/tests/series/methods/test_explode.py | 10 ++++++ 6 files changed, 54 insertions(+), 5 deletions(-) diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index e177e2b1d87d5..83f830bb11198 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -34,6 +34,7 @@ objects. api.extensions.ExtensionArray._accumulate api.extensions.ExtensionArray._concat_same_type + api.extensions.ExtensionArray._explode api.extensions.ExtensionArray._formatter api.extensions.ExtensionArray._from_factorized api.extensions.ExtensionArray._from_sequence diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 55a3419e95703..0fc4afc95a2ce 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -73,6 +73,7 @@ enhancement2 Other enhancements ^^^^^^^^^^^^^^^^^^ +- :meth:`ExtensionArray._explode` interface method added to allow extension type implementations of the ``explode`` method (:issue:`54833`) - DataFrame.apply now allows the usage of numba (via ``engine="numba"``) to JIT compile the passed function, allowing for potential speedups (:issue:`54666`) - diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index a329c37c77449..e67b7035822cc 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1609,6 +1609,10 @@ def _explode(self): """ See Series.explode.__doc__. """ + # child class explode method supports only list types; return + # default implementation for non list types. + if not pa.types.is_list(self.dtype.pyarrow_dtype): + return super()._explode() values = self counts = pa.compute.list_value_length(values._pa_array) counts = counts.fill_null(1).to_numpy() diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index f3bb7323c7d5f..933944dbd4632 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -142,6 +142,7 @@ class ExtensionArray: view _accumulate _concat_same_type + _explode _formatter _from_factorized _from_sequence @@ -1924,6 +1925,41 @@ def _hash_pandas_object( values, encoding=encoding, hash_key=hash_key, categorize=categorize ) + def _explode(self) -> tuple[Self, npt.NDArray[np.uint64]]: + """ + Transform each element of list-like to a row. + + For arrays that do not contain list-like elements the default + implementation of this method just returns a copy and an array + of ones (unchanged index). + + Returns + ------- + ExtensionArray + Array with the exploded values. + np.ndarray[uint64] + The original lengths of each list-like for determining the + resulting index. + + See Also + -------- + Series.explode : The method on the ``Series`` object that this + extension array method is meant to support. + + Examples + -------- + >>> import pyarrow as pa + >>> a = pd.array([[1, 2, 3], [4], [5, 6]], + ... dtype=pd.ArrowDtype(pa.list_(pa.int64()))) + >>> a._explode() + ( + [1, 2, 3, 4, 5, 6] + Length: 6, dtype: int64[pyarrow], array([3, 1, 2], dtype=int32)) + """ + values = self.copy() + counts = np.ones(shape=(len(self),), dtype=np.uint64) + return values, counts + def tolist(self) -> list: """ Return a list of the values. diff --git a/pandas/core/series.py b/pandas/core/series.py index e0e27581ef7e2..78ec1554198df 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -76,10 +76,7 @@ pandas_dtype, validate_all_hashable, ) -from pandas.core.dtypes.dtypes import ( - ArrowDtype, - ExtensionDtype, -) +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import ( @@ -4390,7 +4387,7 @@ def explode(self, ignore_index: bool = False) -> Series: 3 4 dtype: object """ - if isinstance(self.dtype, ArrowDtype) and self.dtype.type == list: + if isinstance(self.dtype, ExtensionDtype): values, counts = self._values._explode() elif len(self) and is_object_dtype(self.dtype): values, counts = reshape.explode(np.asarray(self._values)) diff --git a/pandas/tests/series/methods/test_explode.py b/pandas/tests/series/methods/test_explode.py index c8a9eb6f89fde..5a0188585ef30 100644 --- a/pandas/tests/series/methods/test_explode.py +++ b/pandas/tests/series/methods/test_explode.py @@ -163,3 +163,13 @@ def test_explode_pyarrow_list_type(ignore_index): dtype=pd.ArrowDtype(pa.int64()), ) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ignore_index", [True, False]) +def test_explode_pyarrow_non_list_type(ignore_index): + pa = pytest.importorskip("pyarrow") + data = [1, 2, 3] + ser = pd.Series(data, dtype=pd.ArrowDtype(pa.int64())) + result = ser.explode(ignore_index=ignore_index) + expected = pd.Series([1, 2, 3], dtype="int64[pyarrow]", index=[0, 1, 2]) + tm.assert_series_equal(result, expected) From ebca6dfb8f159ddc00081a463a6d9eda0fc96d8c Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:02:54 -1000 Subject: [PATCH 84/93] CI: Fix no BLAS error in 32 bit build (#55185) --- .github/workflows/unit-tests.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 98c6226b14075..139cfcde95b2c 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -230,11 +230,13 @@ jobs: git -c user.email="you@example.com" merge --no-commit my_ref_name fi - name: Build environment and Run Tests + # https://github.com/numpy/numpy/issues/24703#issuecomment-1722379388 run: | /opt/python/cp311-cp311/bin/python -m venv ~/virtualenvs/pandas-dev . ~/virtualenvs/pandas-dev/bin/activate python -m pip install --no-cache-dir -U pip wheel setuptools meson[ninja]==1.2.1 meson-python==0.13.1 - python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 + python -m pip install numpy --config-settings=setup-args="-Dallow-noblas=true" + python -m pip install --no-cache-dir versioneer[toml] cython python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 python -m pip install --no-cache-dir --no-build-isolation -e . python -m pip list --no-cache-dir export PANDAS_CI=1 From df7c0b772c775a65ae5fa2dae3ac192060ea2fae Mon Sep 17 00:00:00 2001 From: William Ayd Date: Mon, 18 Sep 2023 19:47:10 -0400 Subject: [PATCH 85/93] Replace classes with pytest in test_sql (#55074) * initial working test * passing mixing class removal * converted non-sqlalchemy tests * large refactor * sqlite class conversion * checkpoint * sqlitefallback conversion * fixup tests * no more test classes * factory func * most code cleanups * removed breakpoint; passing tests * fixes * fix when missing SQLAlchemy * more fixups when no SQLAlchemy * fixups * xfail -> skip * sqlite fixture use transaction for cleanup * verbose test for hangs * try skipping sqlite-sqlalchemy-memory on rollback test * sqlite sqlaclchemy memory cleanup * revert verbose logging in tests * mark all db tests * try single_cpu * skip more engine tests that can hang * try no pandasSQL without transaction * more skip * try verbose * transaction skips * remove verbose CI * CI verbose * no more hanging * reverted CI files * type ignore * cleanup skips * remove marks * mark fixtures * mark postgres fixtures --- pandas/tests/io/test_sql.py | 3772 +++++++++++++++++------------------ 1 file changed, 1880 insertions(+), 1892 deletions(-) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 1abe0ad55a864..f015c9efe7122 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -397,6 +397,54 @@ def test_frame3(): return DataFrame(data, columns=columns) +def get_all_views(conn): + if isinstance(conn, sqlite3.Connection): + c = conn.execute("SELECT name FROM sqlite_master WHERE type='view'") + return [view[0] for view in c.fetchall()] + else: + from sqlalchemy import inspect + + return inspect(conn).get_view_names() + + +def get_all_tables(conn): + if isinstance(conn, sqlite3.Connection): + c = conn.execute("SELECT name FROM sqlite_master WHERE type='table'") + return [table[0] for table in c.fetchall()] + else: + from sqlalchemy import inspect + + return inspect(conn).get_table_names() + + +def drop_table( + table_name: str, + conn: sqlite3.Connection | sqlalchemy.engine.Engine | sqlalchemy.engine.Connection, +): + if isinstance(conn, sqlite3.Connection): + conn.execute(f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}") + conn.commit() + else: + with conn.begin() as con: + sql.SQLDatabase(con).drop_table(table_name) + + +def drop_view( + view_name: str, + conn: sqlite3.Connection | sqlalchemy.engine.Engine | sqlalchemy.engine.Connection, +): + if isinstance(conn, sqlite3.Connection): + conn.execute(f"DROP VIEW IF EXISTS {sql._get_valid_sqlite_name(view_name)}") + conn.commit() + else: + quoted_view = conn.engine.dialect.identifier_preparer.quote_identifier( + view_name + ) + stmt = sqlalchemy.text(f"DROP VIEW IF EXISTS {quoted_view}") + with conn.begin() as con: + con.execute(stmt) # type: ignore[union-attr] + + @pytest.fixture def mysql_pymysql_engine(iris_path, types_data): sqlalchemy = pytest.importorskip("sqlalchemy") @@ -416,10 +464,10 @@ def mysql_pymysql_engine(iris_path, types_data): if not insp.has_table("iris_view"): create_and_load_iris_view(engine) yield engine - with engine.connect() as conn: - with conn.begin(): - stmt = sqlalchemy.text("DROP TABLE IF EXISTS test_frame;") - conn.execute(stmt) + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) engine.dispose() @@ -445,10 +493,10 @@ def postgresql_psycopg2_engine(iris_path, types_data): if not insp.has_table("iris_view"): create_and_load_iris_view(engine) yield engine - with engine.connect() as conn: - with conn.begin(): - stmt = sqlalchemy.text("DROP TABLE IF EXISTS test_frame;") - conn.execute(stmt) + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) engine.dispose() @@ -481,6 +529,10 @@ def sqlite_engine(sqlite_str, iris_path, types_data): create_and_load_types(engine, types_data, "sqlite") yield engine + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) engine.dispose() @@ -528,32 +580,60 @@ def sqlite_buildin(): @pytest.fixture -def sqlite_buildin_iris(sqlite_buildin, iris_path): +def sqlite_sqlalchemy_memory_engine(iris_path, types_data): + sqlalchemy = pytest.importorskip("sqlalchemy") + engine = sqlalchemy.create_engine("sqlite:///:memory:") + + insp = sqlalchemy.inspect(engine) + if not insp.has_table("iris"): + create_and_load_iris(engine, iris_path, "sqlite") + if not insp.has_table("iris_view"): + create_and_load_iris_view(engine) + if not insp.has_table("types"): + for entry in types_data: + entry.pop("DateColWithTz") + create_and_load_types(engine, types_data, "sqlite") + + yield engine + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) + + +@pytest.fixture +def sqlite_buildin_iris(sqlite_buildin, iris_path, types_data): create_and_load_iris_sqlite3(sqlite_buildin, iris_path) + + for entry in types_data: + entry.pop("DateColWithTz") + types_data = [tuple(entry.values()) for entry in types_data] + + create_and_load_types_sqlite3(sqlite_buildin, types_data) return sqlite_buildin mysql_connectable = [ - "mysql_pymysql_engine", - "mysql_pymysql_conn", + pytest.param("mysql_pymysql_engine", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn", marks=pytest.mark.db), ] postgresql_connectable = [ - "postgresql_psycopg2_engine", - "postgresql_psycopg2_conn", + pytest.param("postgresql_psycopg2_engine", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn", marks=pytest.mark.db), ] sqlite_connectable = [ - "sqlite_engine", - "sqlite_conn", - "sqlite_str", + pytest.param("sqlite_engine", marks=pytest.mark.db), + pytest.param("sqlite_conn", marks=pytest.mark.db), + pytest.param("sqlite_str", marks=pytest.mark.db), ] sqlite_iris_connectable = [ - "sqlite_iris_engine", - "sqlite_iris_conn", - "sqlite_iris_str", + pytest.param("sqlite_iris_engine", marks=pytest.mark.db), + pytest.param("sqlite_iris_conn", marks=pytest.mark.db), + pytest.param("sqlite_iris_str", marks=pytest.mark.db), ] sqlalchemy_connectable = mysql_connectable + postgresql_connectable + sqlite_connectable @@ -562,12 +642,17 @@ def sqlite_buildin_iris(sqlite_buildin, iris_path): mysql_connectable + postgresql_connectable + sqlite_iris_connectable ) -all_connectable = sqlalchemy_connectable + ["sqlite_buildin"] +all_connectable = sqlalchemy_connectable + [ + "sqlite_buildin", + "sqlite_sqlalchemy_memory_engine", +] -all_connectable_iris = sqlalchemy_connectable_iris + ["sqlite_buildin_iris"] +all_connectable_iris = sqlalchemy_connectable_iris + [ + "sqlite_buildin_iris", + "sqlite_sqlalchemy_memory_engine", +] -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_dataframe_to_sql(conn, test_frame1, request): # GH 51086 if conn is sqlite_engine @@ -575,7 +660,14 @@ def test_dataframe_to_sql(conn, test_frame1, request): test_frame1.to_sql(name="test", con=conn, if_exists="append", index=False) -@pytest.mark.db +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_empty(conn, test_frame1, request): + # GH 51086 if conn is sqlite_engine + conn = request.getfixturevalue(conn) + empty_df = test_frame1.iloc[:0] + empty_df.to_sql(name="test", con=conn, if_exists="append", index=False) + + @pytest.mark.parametrize("conn", all_connectable) def test_dataframe_to_sql_arrow_dtypes(conn, request): # GH 52046 @@ -596,7 +688,6 @@ def test_dataframe_to_sql_arrow_dtypes(conn, request): df.to_sql(name="test_arrow", con=conn, if_exists="replace", index=False) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture): # GH 52046 @@ -612,7 +703,6 @@ def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture): df.to_sql(name="test_arrow", con=conn, if_exists="replace", index=False) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) @pytest.mark.parametrize("method", [None, "multi"]) def test_to_sql(conn, method, test_frame1, request): @@ -623,7 +713,6 @@ def test_to_sql(conn, method, test_frame1, request): assert count_rows(conn, "test_frame") == len(test_frame1) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) @pytest.mark.parametrize("mode, num_row_coef", [("replace", 1), ("append", 2)]) def test_to_sql_exist(conn, mode, num_row_coef, test_frame1, request): @@ -635,7 +724,6 @@ def test_to_sql_exist(conn, mode, num_row_coef, test_frame1, request): assert count_rows(conn, "test_frame") == num_row_coef * len(test_frame1) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_to_sql_exist_fail(conn, test_frame1, request): conn = request.getfixturevalue(conn) @@ -648,7 +736,6 @@ def test_to_sql_exist_fail(conn, test_frame1, request): pandasSQL.to_sql(test_frame1, "test_frame", if_exists="fail") -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable_iris) def test_read_iris_query(conn, request): conn = request.getfixturevalue(conn) @@ -661,7 +748,6 @@ def test_read_iris_query(conn, request): assert "SepalWidth" in iris_frame.columns -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable_iris) def test_read_iris_query_chunksize(conn, request): conn = request.getfixturevalue(conn) @@ -674,7 +760,6 @@ def test_read_iris_query_chunksize(conn, request): assert "SepalWidth" in iris_frame.columns -@pytest.mark.db @pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) def test_read_iris_query_expression_with_parameter(conn, request): conn = request.getfixturevalue(conn) @@ -696,7 +781,6 @@ def test_read_iris_query_expression_with_parameter(conn, request): autoload_con.dispose() -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable_iris) def test_read_iris_query_string_with_parameter(conn, request, sql_strings): for db, query in sql_strings["read_parameters"].items(): @@ -709,7 +793,6 @@ def test_read_iris_query_string_with_parameter(conn, request, sql_strings): check_iris_frame(iris_frame) -@pytest.mark.db @pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) def test_read_iris_table(conn, request): # GH 51015 if conn = sqlite_iris_str @@ -720,7 +803,6 @@ def test_read_iris_table(conn, request): check_iris_frame(iris_frame) -@pytest.mark.db @pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) def test_read_iris_table_chunksize(conn, request): conn = request.getfixturevalue(conn) @@ -730,7 +812,6 @@ def test_read_iris_table_chunksize(conn, request): check_iris_frame(iris_frame) -@pytest.mark.db @pytest.mark.parametrize("conn", sqlalchemy_connectable) def test_to_sql_callable(conn, test_frame1, request): conn = request.getfixturevalue(conn) @@ -749,26 +830,38 @@ def sample(pd_table, conn, keys, data_iter): assert count_rows(conn, "test_frame") == len(test_frame1) -@pytest.mark.db -@pytest.mark.parametrize("conn", mysql_connectable) +@pytest.mark.parametrize("conn", all_connectable_iris) def test_default_type_conversion(conn, request): + conn_name = conn + if conn_name == "sqlite_buildin_iris": + request.node.add_marker( + pytest.mark.xfail( + reason="sqlite_buildin connection does not implement read_sql_table" + ) + ) + conn = request.getfixturevalue(conn) df = sql.read_sql_table("types", conn) assert issubclass(df.FloatCol.dtype.type, np.floating) assert issubclass(df.IntCol.dtype.type, np.integer) - # MySQL has no real BOOL type (it's an alias for TINYINT) - assert issubclass(df.BoolCol.dtype.type, np.integer) + # MySQL/sqlite has no real BOOL type + if "postgresql" in conn_name: + assert issubclass(df.BoolCol.dtype.type, np.bool_) + else: + assert issubclass(df.BoolCol.dtype.type, np.integer) # Int column with NA values stays as float assert issubclass(df.IntColWithNull.dtype.type, np.floating) # Bool column with NA = int column with NA values => becomes float - assert issubclass(df.BoolColWithNull.dtype.type, np.floating) + if "postgresql" in conn_name: + assert issubclass(df.BoolColWithNull.dtype.type, object) + else: + assert issubclass(df.BoolColWithNull.dtype.type, np.floating) -@pytest.mark.db @pytest.mark.parametrize("conn", mysql_connectable) def test_read_procedure(conn, request): conn = request.getfixturevalue(conn) @@ -806,7 +899,6 @@ def test_read_procedure(conn, request): tm.assert_frame_equal(df, res2) -@pytest.mark.db @pytest.mark.parametrize("conn", postgresql_connectable) @pytest.mark.parametrize("expected_count", [2, "Success!"]) def test_copy_from_callable_insertion_method(conn, expected_count, request): @@ -846,7 +938,6 @@ def psql_insert_copy(table, conn, keys, data_iter): tm.assert_frame_equal(result, expected) -@pytest.mark.db @pytest.mark.parametrize("conn", postgresql_connectable) def test_insertion_method_on_conflict_do_nothing(conn, request): # GH 15988: Example in to_sql docstring @@ -905,7 +996,6 @@ def insert_on_conflict(table, conn, keys, data_iter): pandasSQL.drop_table("test_insert_conflict") -@pytest.mark.db @pytest.mark.parametrize("conn", mysql_connectable) def test_insertion_method_on_conflict_update(conn, request): # GH 14553: Example in to_sql docstring @@ -959,7 +1049,6 @@ def insert_on_conflict(table, conn, keys, data_iter): pandasSQL.drop_table("test_insert_conflict") -@pytest.mark.db @pytest.mark.parametrize("conn", postgresql_connectable) def test_read_view_postgres(conn, request): # GH 52969 @@ -1041,217 +1130,65 @@ def test_execute_deprecated(sqlite_buildin_iris): sql.execute("select * from iris", sqlite_buildin_iris) -class MixInBase: - def teardown_method(self): - # if setup fails, there may not be a connection to close. - if hasattr(self, "conn"): - self.conn.close() - # use a fresh connection to ensure we can drop all tables. - try: - conn = self.connect() - except (sqlalchemy.exc.OperationalError, sqlite3.OperationalError): - pass - else: - with conn: - for view in self._get_all_views(conn): - self.drop_view(view, conn) - for tbl in self._get_all_tables(conn): - self.drop_table(tbl, conn) - - -class SQLiteMixIn(MixInBase): - def connect(self): - return sqlite3.connect(":memory:") - - def drop_table(self, table_name, conn): - conn.execute(f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}") - conn.commit() - - def _get_all_tables(self, conn): - c = conn.execute("SELECT name FROM sqlite_master WHERE type='table'") - return [table[0] for table in c.fetchall()] - - def drop_view(self, view_name, conn): - conn.execute(f"DROP VIEW IF EXISTS {sql._get_valid_sqlite_name(view_name)}") - conn.commit() - - def _get_all_views(self, conn): - c = conn.execute("SELECT name FROM sqlite_master WHERE type='view'") - return [view[0] for view in c.fetchall()] - - -class SQLAlchemyMixIn(MixInBase): - @classmethod - def teardown_class(cls): - cls.engine.dispose() - - def connect(self): - return self.engine.connect() - - def drop_table(self, table_name, conn): - if conn.in_transaction(): - conn.get_transaction().rollback() - with conn.begin(): - sql.SQLDatabase(conn).drop_table(table_name) - - def _get_all_tables(self, conn): - from sqlalchemy import inspect - - return inspect(conn).get_table_names() - - def drop_view(self, view_name, conn): - quoted_view = conn.engine.dialect.identifier_preparer.quote_identifier( - view_name - ) - if conn.in_transaction(): - conn.get_transaction().rollback() - with conn.begin(): - conn.exec_driver_sql(f"DROP VIEW IF EXISTS {quoted_view}") - - def _get_all_views(self, conn): - from sqlalchemy import inspect - - return inspect(conn).get_view_names() - - -class PandasSQLTest: - """ - Base class with common private methods for SQLAlchemy and fallback cases. - - """ - - def load_iris_data(self, iris_path): - self.drop_view("iris_view", self.conn) - self.drop_table("iris", self.conn) - if isinstance(self.conn, sqlite3.Connection): - create_and_load_iris_sqlite3(self.conn, iris_path) - else: - create_and_load_iris(self.conn, iris_path, self.flavor) - - def load_types_data(self, types_data): - if self.flavor != "postgresql": - for entry in types_data: - entry.pop("DateColWithTz") - if isinstance(self.conn, sqlite3.Connection): - types_data = [tuple(entry.values()) for entry in types_data] - create_and_load_types_sqlite3(self.conn, types_data) - else: - create_and_load_types(self.conn, types_data, self.flavor) - - def _read_sql_iris_parameter(self, sql_strings): - query = sql_strings["read_parameters"][self.flavor] - params = ("Iris-setosa", 5.1) - with self.pandasSQL.run_transaction(): - iris_frame = self.pandasSQL.read_query(query, params=params) - check_iris_frame(iris_frame) - - def _read_sql_iris_named_parameter(self, sql_strings): - query = sql_strings["read_named_parameters"][self.flavor] - params = {"name": "Iris-setosa", "length": 5.1} - with self.pandasSQL.run_transaction(): - iris_frame = self.pandasSQL.read_query(query, params=params) - check_iris_frame(iris_frame) - - def _read_sql_iris_no_parameter_with_percent(self, sql_strings): - query = sql_strings["read_no_parameters_with_percent"][self.flavor] - with self.pandasSQL.run_transaction(): - iris_frame = self.pandasSQL.read_query(query, params=None) - check_iris_frame(iris_frame) - - def _to_sql_empty(self, test_frame1): - self.drop_table("test_frame1", self.conn) - assert self.pandasSQL.to_sql(test_frame1.iloc[:0], "test_frame1") == 0 - - def _to_sql_with_sql_engine(self, test_frame1, engine="auto", **engine_kwargs): - """`to_sql` with the `engine` param""" - # mostly copied from this class's `_to_sql()` method - self.drop_table("test_frame1", self.conn) - - assert ( - self.pandasSQL.to_sql( - test_frame1, "test_frame1", engine=engine, **engine_kwargs - ) - == 4 - ) - assert self.pandasSQL.has_table("test_frame1") - - num_entries = len(test_frame1) - num_rows = count_rows(self.conn, "test_frame1") - assert num_rows == num_entries +@pytest.fixture +def flavor(): + def func(conn_name): + if "postgresql" in conn_name: + return "postgresql" + elif "sqlite" in conn_name: + return "sqlite" + elif "mysql" in conn_name: + return "mysql" - # Nuke table - self.drop_table("test_frame1", self.conn) + raise ValueError(f"unsupported connection: {conn_name}") - def _roundtrip(self, test_frame1): - self.drop_table("test_frame_roundtrip", self.conn) - assert self.pandasSQL.to_sql(test_frame1, "test_frame_roundtrip") == 4 - with self.pandasSQL.run_transaction(): - result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip") + return func - result.set_index("level_0", inplace=True) - # result.index.astype(int) - result.index.name = None +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_parameter(conn, request, sql_strings, flavor): + conn_name = conn + conn = request.getfixturevalue(conn) + query = sql_strings["read_parameters"][flavor(conn_name)] + params = ("Iris-setosa", 5.1) + pandasSQL = pandasSQL_builder(conn) - tm.assert_frame_equal(result, test_frame1) + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=params) + check_iris_frame(iris_frame) - def _execute_sql(self): - # drop_sql = "DROP TABLE IF EXISTS test" # should already be done - iris_results = self.pandasSQL.execute("SELECT * FROM iris") - row = iris_results.fetchone() - tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"]) - def _to_sql_save_index(self): - df = DataFrame.from_records( - [(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"] - ) - assert self.pandasSQL.to_sql(df, "test_to_sql_saves_index") == 2 - ix_cols = self._get_index_columns("test_to_sql_saves_index") - assert ix_cols == [["A"]] - - def _transaction_test(self): - with self.pandasSQL.run_transaction() as trans: - stmt = "CREATE TABLE test_trans (A INT, B TEXT)" - if isinstance(self.pandasSQL, SQLiteDatabase): - trans.execute(stmt) - else: - from sqlalchemy import text +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_named_parameter(conn, request, sql_strings, flavor): + conn_name = conn + conn = request.getfixturevalue(conn) + query = sql_strings["read_named_parameters"][flavor(conn_name)] + params = {"name": "Iris-setosa", "length": 5.1} + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=params) + check_iris_frame(iris_frame) - stmt = text(stmt) - trans.execute(stmt) - class DummyException(Exception): - pass +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_no_parameter_with_percent(conn, request, sql_strings, flavor): + if "mysql" in conn or "postgresql" in conn: + request.node.add_marker(pytest.mark.xfail(reason="broken test")) - # Make sure when transaction is rolled back, no rows get inserted - ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')" - if isinstance(self.pandasSQL, SQLDatabase): - from sqlalchemy import text + conn_name = conn + conn = request.getfixturevalue(conn) - ins_sql = text(ins_sql) - try: - with self.pandasSQL.run_transaction() as trans: - trans.execute(ins_sql) - raise DummyException("error") - except DummyException: - # ignore raised exception - pass - with self.pandasSQL.run_transaction(): - res = self.pandasSQL.read_query("SELECT * FROM test_trans") - assert len(res) == 0 - - # Make sure when transaction is committed, rows do get inserted - with self.pandasSQL.run_transaction() as trans: - trans.execute(ins_sql) - res2 = self.pandasSQL.read_query("SELECT * FROM test_trans") - assert len(res2) == 1 + query = sql_strings["read_no_parameters_with_percent"][flavor(conn_name)] + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=None) + check_iris_frame(iris_frame) # ----------------------------------------------------------------------------- # -- Testing the public API -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable_iris) def test_api_read_sql_view(conn, request): conn = request.getfixturevalue(conn) @@ -1259,7 +1196,6 @@ def test_api_read_sql_view(conn, request): check_iris_frame(iris_frame) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable_iris) def test_api_read_sql_with_chunksize_no_result(conn, request): conn = request.getfixturevalue(conn) @@ -1269,7 +1205,6 @@ def test_api_read_sql_with_chunksize_no_result(conn, request): tm.assert_frame_equal(concat(with_batch), without_batch) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_to_sql(conn, request, test_frame1): conn = request.getfixturevalue(conn) @@ -1281,7 +1216,6 @@ def test_api_to_sql(conn, request, test_frame1): assert sql.has_table("test_frame1", conn) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_to_sql_fail(conn, request, test_frame1): conn = request.getfixturevalue(conn) @@ -1297,7 +1231,6 @@ def test_api_to_sql_fail(conn, request, test_frame1): sql.to_sql(test_frame1, "test_frame2", conn, if_exists="fail") -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_to_sql_replace(conn, request, test_frame1): conn = request.getfixturevalue(conn) @@ -1316,7 +1249,6 @@ def test_api_to_sql_replace(conn, request, test_frame1): assert num_rows == num_entries -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_to_sql_append(conn, request, test_frame1): conn = request.getfixturevalue(conn) @@ -1336,7 +1268,6 @@ def test_api_to_sql_append(conn, request, test_frame1): assert num_rows == num_entries -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_to_sql_type_mapping(conn, request, test_frame3): conn = request.getfixturevalue(conn) @@ -1350,7 +1281,6 @@ def test_api_to_sql_type_mapping(conn, request, test_frame3): tm.assert_frame_equal(test_frame3, result) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_to_sql_series(conn, request): conn = request.getfixturevalue(conn) @@ -1364,7 +1294,6 @@ def test_api_to_sql_series(conn, request): tm.assert_frame_equal(s.to_frame(), s2) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_roundtrip(conn, request, test_frame1): conn = request.getfixturevalue(conn) @@ -1383,7 +1312,6 @@ def test_api_roundtrip(conn, request, test_frame1): tm.assert_frame_equal(result, test_frame1) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_roundtrip_chunksize(conn, request, test_frame1): conn = request.getfixturevalue(conn) @@ -1402,7 +1330,6 @@ def test_api_roundtrip_chunksize(conn, request, test_frame1): tm.assert_frame_equal(result, test_frame1) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable_iris) def test_api_execute_sql(conn, request): # drop_sql = "DROP TABLE IF EXISTS test" # should already be done @@ -1413,13 +1340,9 @@ def test_api_execute_sql(conn, request): tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"]) -@pytest.mark.db -@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("conn", all_connectable_iris) def test_api_date_parsing(conn, request): conn_name = conn - if conn_name in {"sqlite_buildin", "sqlite_str"}: - pytest.skip("types tables not created in sqlite_buildin or sqlite_str fixture") - conn = request.getfixturevalue(conn) # Test date parsing in read_sql # No Parsing @@ -1473,8 +1396,7 @@ def test_api_date_parsing(conn, request): ] -@pytest.mark.db -@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("conn", all_connectable_iris) @pytest.mark.parametrize("error", ["ignore", "raise", "coerce"]) @pytest.mark.parametrize( "read_sql, text, mode", @@ -1493,10 +1415,11 @@ def test_api_custom_dateparsing_error( conn, request, read_sql, text, mode, error, types_data_frame ): conn_name = conn - if conn_name in {"sqlite_buildin", "sqlite_str"}: - pytest.skip("types tables not created in sqlite_buildin or sqlite_str fixture") - conn = request.getfixturevalue(conn) + if text == "types" and conn_name == "sqlite_buildin_iris": + request.node.add_marker( + pytest.mark.xfail(reason="failing combination of arguments") + ) expected = types_data_frame.astype({"DateCol": "datetime64[ns]"}) @@ -1516,14 +1439,9 @@ def test_api_custom_dateparsing_error( tm.assert_frame_equal(result, expected) -@pytest.mark.db -@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("conn", all_connectable_iris) def test_api_date_and_index(conn, request): # Test case where same column appears in parse_date and index_col - conn_name = conn - if conn_name in {"sqlite_buildin", "sqlite_str"}: - pytest.skip("types tables not created in sqlite_buildin or sqlite_str fixture") - conn = request.getfixturevalue(conn) df = sql.read_sql_query( "SELECT * FROM types", @@ -1536,7 +1454,6 @@ def test_api_date_and_index(conn, request): assert issubclass(df.IntDateCol.dtype.type, np.datetime64) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_timedelta(conn, request): # see #6921 @@ -1553,7 +1470,6 @@ def test_api_timedelta(conn, request): tm.assert_series_equal(result["foo"], df["foo"].view("int64")) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_complex_raises(conn, request): conn = request.getfixturevalue(conn) @@ -1563,7 +1479,6 @@ def test_api_complex_raises(conn, request): assert df.to_sql("test_complex", con=conn) is None -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) @pytest.mark.parametrize( "index_name,index_label,expected", @@ -1596,13 +1511,14 @@ def test_api_to_sql_index_label(conn, request, index_name, index_label, expected assert frame.columns[0] == expected -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_to_sql_index_label_multiindex(conn, request): conn_name = conn if "mysql" in conn_name: request.node.add_marker( - pytest.mark.xfail(reason="MySQL can fail using TEXT without length as key") + pytest.mark.xfail( + reason="MySQL can fail using TEXT without length as key", strict=False + ) ) conn = request.getfixturevalue(conn) @@ -1665,7 +1581,6 @@ def test_api_to_sql_index_label_multiindex(conn, request): ) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_multiindex_roundtrip(conn, request): conn = request.getfixturevalue(conn) @@ -1686,7 +1601,6 @@ def test_api_multiindex_roundtrip(conn, request): tm.assert_frame_equal(df, result, check_index_type=True) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) @pytest.mark.parametrize( "dtype", @@ -1719,7 +1633,6 @@ def test_api_dtype_argument(conn, request, dtype): tm.assert_frame_equal(result, expected) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_integer_col_names(conn, request): conn = request.getfixturevalue(conn) @@ -1727,7 +1640,6 @@ def test_api_integer_col_names(conn, request): sql.to_sql(df, "test_frame_integer_col_names", conn, if_exists="replace") -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_get_schema(conn, request, test_frame1): conn = request.getfixturevalue(conn) @@ -1735,7 +1647,6 @@ def test_api_get_schema(conn, request, test_frame1): assert "CREATE" in create_sql -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_get_schema_with_schema(conn, request, test_frame1): # GH28486 @@ -1744,7 +1655,6 @@ def test_api_get_schema_with_schema(conn, request, test_frame1): assert "CREATE TABLE pypi." in create_sql -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_get_schema_dtypes(conn, request): conn_name = conn @@ -1762,7 +1672,6 @@ def test_api_get_schema_dtypes(conn, request): assert "INTEGER" in create_sql -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_get_schema_keys(conn, request, test_frame1): conn_name = conn @@ -1785,7 +1694,6 @@ def test_api_get_schema_keys(conn, request, test_frame1): assert constraint_sentence in create_sql -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_chunksize_read(conn, request): conn_name = conn @@ -1831,7 +1739,6 @@ def test_api_chunksize_read(conn, request): tm.assert_frame_equal(res1, res3) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_categorical(conn, request): # GH8624 @@ -1856,7 +1763,6 @@ def test_api_categorical(conn, request): tm.assert_frame_equal(res, df) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_unicode_column_name(conn, request): # GH 11431 @@ -1869,7 +1775,6 @@ def test_api_unicode_column_name(conn, request): df.to_sql(name="test_unicode", con=conn, index=False) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_escaped_table_name(conn, request): # GH 13206 @@ -1891,7 +1796,6 @@ def test_api_escaped_table_name(conn, request): tm.assert_frame_equal(res, df) -@pytest.mark.db @pytest.mark.parametrize("conn", all_connectable) def test_api_read_sql_duplicate_columns(conn, request): # GH#53117 @@ -1911,1102 +1815,1352 @@ def test_api_read_sql_duplicate_columns(conn, request): tm.assert_frame_equal(result, expected) -class _TestSQLApi(PandasSQLTest): - """ - Base class to test the public API. - - From this two classes are derived to run these tests for both the - sqlalchemy mode (`TestSQLApi`) and the fallback mode - (`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific - tests for the different sql flavours are included in `_TestSQLAlchemy`. - - Notes: - flavor can always be passed even in SQLAlchemy mode, - should be correctly ignored. +@pytest.mark.parametrize("conn", all_connectable) +def test_read_table_columns(conn, request, test_frame1): + # test columns argument in read_table + conn_name = conn + if conn_name == "sqlite_buildin": + request.node.add_marker(pytest.mark.xfail(reason="Not Implemented")) - we don't use drop_table because that isn't part of the public api + conn = request.getfixturevalue(conn) + sql.to_sql(test_frame1, "test_frame", conn) - """ + cols = ["A", "B"] - flavor = "sqlite" - mode: str + result = sql.read_sql_table("test_frame", conn, columns=cols) + assert result.columns.tolist() == cols - @pytest.fixture(autouse=True) - def setup_method(self, iris_path, types_data): - self.conn = self.connect() - self.load_iris_data(iris_path) - self.load_types_data(types_data) - self.load_test_data_and_sql() - def load_test_data_and_sql(self): - create_and_load_iris_view(self.conn) +@pytest.mark.parametrize("conn", all_connectable) +def test_read_table_index_col(conn, request, test_frame1): + # test columns argument in read_table + conn_name = conn + if conn_name == "sqlite_buildin": + request.node.add_marker(pytest.mark.xfail(reason="Not Implemented")) + conn = request.getfixturevalue(conn) + sql.to_sql(test_frame1, "test_frame", conn) -@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed") -class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi): - """ - Test the public API as it would be used directly + result = sql.read_sql_table("test_frame", conn, index_col="index") + assert result.index.names == ["index"] - Tests for `read_sql_table` are included here, as this is specific for the - sqlalchemy mode. + result = sql.read_sql_table("test_frame", conn, index_col=["A", "B"]) + assert result.index.names == ["A", "B"] - """ + result = sql.read_sql_table( + "test_frame", conn, index_col=["A", "B"], columns=["C", "D"] + ) + assert result.index.names == ["A", "B"] + assert result.columns.tolist() == ["C", "D"] - flavor = "sqlite" - mode = "sqlalchemy" - @classmethod - def setup_class(cls): - cls.engine = sqlalchemy.create_engine("sqlite:///:memory:") +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_delegate(conn, request): + if conn == "sqlite_buildin_iris": + request.node.add_marker( + pytest.mark.xfail( + reason="sqlite_buildin connection does not implement read_sql_table" + ) + ) - def test_read_table_columns(self, test_frame1): - # test columns argument in read_table - sql.to_sql(test_frame1, "test_frame", self.conn) + conn = request.getfixturevalue(conn) + iris_frame1 = sql.read_sql_query("SELECT * FROM iris", conn) + iris_frame2 = sql.read_sql("SELECT * FROM iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) - cols = ["A", "B"] - result = sql.read_sql_table("test_frame", self.conn, columns=cols) - assert result.columns.tolist() == cols + iris_frame1 = sql.read_sql_table("iris", conn) + iris_frame2 = sql.read_sql("iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) - def test_read_table_index_col(self, test_frame1): - # test columns argument in read_table - sql.to_sql(test_frame1, "test_frame", self.conn) - result = sql.read_sql_table("test_frame", self.conn, index_col="index") - assert result.index.names == ["index"] +def test_not_reflect_all_tables(sqlite_conn): + conn = sqlite_conn + from sqlalchemy import text + from sqlalchemy.engine import Engine - result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"]) - assert result.index.names == ["A", "B"] + # create invalid table + query_list = [ + text("CREATE TABLE invalid (x INTEGER, y UNKNOWN);"), + text("CREATE TABLE other_table (x INTEGER, y INTEGER);"), + ] - result = sql.read_sql_table( - "test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"] - ) - assert result.index.names == ["A", "B"] - assert result.columns.tolist() == ["C", "D"] + for query in query_list: + if isinstance(conn, Engine): + with conn.connect() as conn: + with conn.begin(): + conn.execute(query) + else: + with conn.begin(): + conn.execute(query) - def test_read_sql_delegate(self): - iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn) - iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn) - tm.assert_frame_equal(iris_frame1, iris_frame2) + with tm.assert_produces_warning(None): + sql.read_sql_table("other_table", conn) + sql.read_sql_query("SELECT * FROM other_table", conn) - iris_frame1 = sql.read_sql_table("iris", self.conn) - iris_frame2 = sql.read_sql("iris", self.conn) - tm.assert_frame_equal(iris_frame1, iris_frame2) - def test_not_reflect_all_tables(self): - from sqlalchemy import text - from sqlalchemy.engine import Engine +@pytest.mark.parametrize("conn", all_connectable) +def test_warning_case_insensitive_table_name(conn, request, test_frame1): + conn_name = conn + if conn_name == "sqlite_buildin": + request.node.add_marker(pytest.mark.xfail(reason="Does not raise warning")) - # create invalid table - query_list = [ - text("CREATE TABLE invalid (x INTEGER, y UNKNOWN);"), - text("CREATE TABLE other_table (x INTEGER, y INTEGER);"), - ] - for query in query_list: - if isinstance(self.conn, Engine): - with self.conn.connect() as conn: - with conn.begin(): - conn.execute(query) - else: - with self.conn.begin(): - self.conn.execute(query) + conn = request.getfixturevalue(conn) + # see gh-7815 + with tm.assert_produces_warning( + UserWarning, + match=( + r"The provided table name 'TABLE1' is not found exactly as such in " + r"the database after writing the table, possibly due to case " + r"sensitivity issues. Consider using lower case table names." + ), + ): + sql.SQLDatabase(conn).check_case_sensitive("TABLE1", "") - with tm.assert_produces_warning(None): - sql.read_sql_table("other_table", self.conn) - sql.read_sql_query("SELECT * FROM other_table", self.conn) + # Test that the warning is certainly NOT triggered in a normal case. + with tm.assert_produces_warning(None): + test_frame1.to_sql(name="CaseSensitive", con=conn) - def test_warning_case_insensitive_table_name(self, test_frame1): - # see gh-7815 - with tm.assert_produces_warning( - UserWarning, - match=( - r"The provided table name 'TABLE1' is not found exactly as such in " - r"the database after writing the table, possibly due to case " - r"sensitivity issues. Consider using lower case table names." - ), - ): - sql.SQLDatabase(self.conn).check_case_sensitive("TABLE1", "") - # Test that the warning is certainly NOT triggered in a normal case. - with tm.assert_produces_warning(None): - test_frame1.to_sql(name="CaseSensitive", con=self.conn) +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_sqlalchemy_type_mapping(conn, request): + conn = request.getfixturevalue(conn) + from sqlalchemy import TIMESTAMP - def _get_index_columns(self, tbl_name): - from sqlalchemy.engine import reflection + # Test Timestamp objects (no datetime64 because of timezone) (GH9085) + df = DataFrame( + {"time": to_datetime(["2014-12-12 01:54", "2014-12-11 02:54"], utc=True)} + ) + db = sql.SQLDatabase(conn) + table = sql.SQLTable("test_type", db, frame=df) + # GH 9086: TIMESTAMP is the suggested type for datetimes with timezones + assert isinstance(table.table.c["time"].type, TIMESTAMP) - insp = reflection.Inspector.from_engine(self.conn) - ixs = insp.get_indexes("test_index_saved") - ixs = [i["column_names"] for i in ixs] - return ixs - def test_sqlalchemy_type_mapping(self): - from sqlalchemy import TIMESTAMP +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize( + "integer, expected", + [ + ("int8", "SMALLINT"), + ("Int8", "SMALLINT"), + ("uint8", "SMALLINT"), + ("UInt8", "SMALLINT"), + ("int16", "SMALLINT"), + ("Int16", "SMALLINT"), + ("uint16", "INTEGER"), + ("UInt16", "INTEGER"), + ("int32", "INTEGER"), + ("Int32", "INTEGER"), + ("uint32", "BIGINT"), + ("UInt32", "BIGINT"), + ("int64", "BIGINT"), + ("Int64", "BIGINT"), + (int, "BIGINT" if np.dtype(int).name == "int64" else "INTEGER"), + ], +) +def test_sqlalchemy_integer_mapping(conn, request, integer, expected): + # GH35076 Map pandas integer to optimal SQLAlchemy integer type + conn = request.getfixturevalue(conn) + df = DataFrame([0, 1], columns=["a"], dtype=integer) + db = sql.SQLDatabase(conn) + table = sql.SQLTable("test_type", db, frame=df) - # Test Timestamp objects (no datetime64 because of timezone) (GH9085) - df = DataFrame( - {"time": to_datetime(["2014-12-12 01:54", "2014-12-11 02:54"], utc=True)} - ) - db = sql.SQLDatabase(self.conn) - table = sql.SQLTable("test_type", db, frame=df) - # GH 9086: TIMESTAMP is the suggested type for datetimes with timezones - assert isinstance(table.table.c["time"].type, TIMESTAMP) + result = str(table.table.c.a.type) + assert result == expected - @pytest.mark.parametrize( - "integer, expected", - [ - ("int8", "SMALLINT"), - ("Int8", "SMALLINT"), - ("uint8", "SMALLINT"), - ("UInt8", "SMALLINT"), - ("int16", "SMALLINT"), - ("Int16", "SMALLINT"), - ("uint16", "INTEGER"), - ("UInt16", "INTEGER"), - ("int32", "INTEGER"), - ("Int32", "INTEGER"), - ("uint32", "BIGINT"), - ("UInt32", "BIGINT"), - ("int64", "BIGINT"), - ("Int64", "BIGINT"), - (int, "BIGINT" if np.dtype(int).name == "int64" else "INTEGER"), - ], - ) - def test_sqlalchemy_integer_mapping(self, integer, expected): - # GH35076 Map pandas integer to optimal SQLAlchemy integer type - df = DataFrame([0, 1], columns=["a"], dtype=integer) - db = sql.SQLDatabase(self.conn) - table = sql.SQLTable("test_type", db, frame=df) - - result = str(table.table.c.a.type) - assert result == expected - - @pytest.mark.parametrize("integer", ["uint64", "UInt64"]) - def test_sqlalchemy_integer_overload_mapping(self, integer): - # GH35076 Map pandas integer to optimal SQLAlchemy integer type - df = DataFrame([0, 1], columns=["a"], dtype=integer) - db = sql.SQLDatabase(self.conn) - with pytest.raises( - ValueError, match="Unsigned 64 bit integer datatype is not supported" - ): - sql.SQLTable("test_type", db, frame=df) - - def test_database_uri_string(self, test_frame1): - # Test read_sql and .to_sql method with a database URI (GH10654) - # db_uri = 'sqlite:///:memory:' # raises - # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near - # "iris": syntax error [SQL: 'iris'] - with tm.ensure_clean() as name: - db_uri = "sqlite:///" + name - table = "iris" - test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False) - test_frame2 = sql.read_sql(table, db_uri) - test_frame3 = sql.read_sql_table(table, db_uri) - query = "SELECT * FROM iris" - test_frame4 = sql.read_sql_query(query, db_uri) - tm.assert_frame_equal(test_frame1, test_frame2) - tm.assert_frame_equal(test_frame1, test_frame3) - tm.assert_frame_equal(test_frame1, test_frame4) - - @td.skip_if_installed("pg8000") - def test_pg8000_sqlalchemy_passthrough_error(self): - # using driver that will not be installed on CI to trigger error - # in sqlalchemy.create_engine -> test passing of this error to user - db_uri = "postgresql+pg8000://user:pass@host/dbname" - with pytest.raises(ImportError, match="pg8000"): - sql.read_sql("select * from table", db_uri) - - def test_query_by_text_obj(self): - # WIP : GH10846 - from sqlalchemy import text +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize("integer", ["uint64", "UInt64"]) +def test_sqlalchemy_integer_overload_mapping(conn, request, integer): + conn = request.getfixturevalue(conn) + # GH35076 Map pandas integer to optimal SQLAlchemy integer type + df = DataFrame([0, 1], columns=["a"], dtype=integer) + db = sql.SQLDatabase(conn) + with pytest.raises( + ValueError, match="Unsigned 64 bit integer datatype is not supported" + ): + sql.SQLTable("test_type", db, frame=df) + + +@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="fails without SQLAlchemy") +@pytest.mark.parametrize("conn", all_connectable) +def test_database_uri_string(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + # Test read_sql and .to_sql method with a database URI (GH10654) + # db_uri = 'sqlite:///:memory:' # raises + # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near + # "iris": syntax error [SQL: 'iris'] + with tm.ensure_clean() as name: + db_uri = "sqlite:///" + name + table = "iris" + test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False) + test_frame2 = sql.read_sql(table, db_uri) + test_frame3 = sql.read_sql_table(table, db_uri) + query = "SELECT * FROM iris" + test_frame4 = sql.read_sql_query(query, db_uri) + tm.assert_frame_equal(test_frame1, test_frame2) + tm.assert_frame_equal(test_frame1, test_frame3) + tm.assert_frame_equal(test_frame1, test_frame4) + + +@td.skip_if_installed("pg8000") +@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="fails without SQLAlchemy") +@pytest.mark.parametrize("conn", all_connectable) +def test_pg8000_sqlalchemy_passthrough_error(conn, request): + conn = request.getfixturevalue(conn) + # using driver that will not be installed on CI to trigger error + # in sqlalchemy.create_engine -> test passing of this error to user + db_uri = "postgresql+pg8000://user:pass@host/dbname" + with pytest.raises(ImportError, match="pg8000"): + sql.read_sql("select * from table", db_uri) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_query_by_text_obj(conn, request): + # WIP : GH10846 + conn_name = conn + conn = request.getfixturevalue(conn) + from sqlalchemy import text + + if "postgres" in conn_name: + name_text = text('select * from iris where "Name"=:name') + else: name_text = text("select * from iris where name=:name") - iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"}) - all_names = set(iris_df["Name"]) - assert all_names == {"Iris-versicolor"} - - def test_query_by_select_obj(self): - # WIP : GH10846 - from sqlalchemy import ( - bindparam, - select, - ) + iris_df = sql.read_sql(name_text, conn, params={"name": "Iris-versicolor"}) + all_names = set(iris_df["Name"]) + assert all_names == {"Iris-versicolor"} - iris = iris_table_metadata(self.flavor) - name_select = select(iris).where(iris.c.Name == bindparam("name")) - iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"}) - all_names = set(iris_df["Name"]) - assert all_names == {"Iris-setosa"} - def test_column_with_percentage(self): - # GH 37157 - df = DataFrame({"A": [0, 1, 2], "%_variation": [3, 4, 5]}) - df.to_sql(name="test_column_percentage", con=self.conn, index=False) +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_query_by_select_obj(conn, request): + conn = request.getfixturevalue(conn) + # WIP : GH10846 + from sqlalchemy import ( + bindparam, + select, + ) - res = sql.read_sql_table("test_column_percentage", self.conn) + iris = iris_table_metadata("postgres") + name_select = select(iris).where(iris.c.Name == bindparam("name")) + iris_df = sql.read_sql(name_select, conn, params={"name": "Iris-setosa"}) + all_names = set(iris_df["Name"]) + assert all_names == {"Iris-setosa"} - tm.assert_frame_equal(res, df) +@pytest.mark.parametrize("conn", all_connectable) +def test_column_with_percentage(conn, request): + # GH 37157 + conn_name = conn + if conn_name == "sqlite_buildin": + request.node.add_marker(pytest.mark.xfail(reason="Not Implemented")) -class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi): - """ - Test the public sqlite connection fallback API + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "%_variation": [3, 4, 5]}) + df.to_sql(name="test_column_percentage", con=conn, index=False) - """ + res = sql.read_sql_table("test_column_percentage", conn) - flavor = "sqlite" - mode = "fallback" + tm.assert_frame_equal(res, df) - def connect(self, database=":memory:"): - return sqlite3.connect(database) - def test_sql_open_close(self, test_frame3): - # Test if the IO in the database still work if the connection closed - # between the writing and reading (as in many real situations). +def test_sql_open_close(test_frame3): + # Test if the IO in the database still work if the connection closed + # between the writing and reading (as in many real situations). - with tm.ensure_clean() as name: - with closing(self.connect(name)) as conn: - assert ( - sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) - == 4 - ) + with tm.ensure_clean() as name: + with closing(sqlite3.connect(name)) as conn: + assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4 - with closing(self.connect(name)) as conn: - result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn) + with closing(sqlite3.connect(name)) as conn: + result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn) - tm.assert_frame_equal(test_frame3, result) + tm.assert_frame_equal(test_frame3, result) - @pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed") - def test_con_string_import_error(self): - conn = "mysql://root@localhost/pandas" - msg = "Using URI string without sqlalchemy installed" - with pytest.raises(ImportError, match=msg): - sql.read_sql("SELECT * FROM iris", conn) - @pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed") - def test_con_unknown_dbapi2_class_does_not_error_without_sql_alchemy_installed( - self, - ): - class MockSqliteConnection: - def __init__(self, *args, **kwargs) -> None: - self.conn = sqlite3.Connection(*args, **kwargs) - - def __getattr__(self, name): - return getattr(self.conn, name) - - def close(self): - self.conn.close() - - with contextlib.closing(MockSqliteConnection(":memory:")) as conn: - with tm.assert_produces_warning(UserWarning): - sql.read_sql("SELECT 1", conn) - - def test_read_sql_delegate(self): - iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn) - iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn) - tm.assert_frame_equal(iris_frame1, iris_frame2) - - msg = "Execution failed on sql 'iris': near \"iris\": syntax error" - with pytest.raises(sql.DatabaseError, match=msg): - sql.read_sql("iris", self.conn) - - def test_get_schema2(self, test_frame1): - # without providing a connection object (available for backwards comp) - create_sql = sql.get_schema(test_frame1, "test") - assert "CREATE" in create_sql - - def _get_sqlite_column_type(self, schema, column): - for col in schema.split("\n"): - if col.split()[0].strip('"') == column: - return col.split()[1] - raise ValueError(f"Column {column} not found") - - def test_sqlite_type_mapping(self): - # Test Timestamp objects (no datetime64 because of timezone) (GH9085) - df = DataFrame( - {"time": to_datetime(["2014-12-12 01:54", "2014-12-11 02:54"], utc=True)} - ) - db = sql.SQLiteDatabase(self.conn) - table = sql.SQLiteTable("test_type", db, frame=df) - schema = table.sql_schema() - assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP" +@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed") +def test_con_string_import_error(): + conn = "mysql://root@localhost/pandas" + msg = "Using URI string without sqlalchemy installed" + with pytest.raises(ImportError, match=msg): + sql.read_sql("SELECT * FROM iris", conn) -# ----------------------------------------------------------------------------- -# -- Database flavor specific tests +@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed") +def test_con_unknown_dbapi2_class_does_not_error_without_sql_alchemy_installed(): + class MockSqliteConnection: + def __init__(self, *args, **kwargs) -> None: + self.conn = sqlite3.Connection(*args, **kwargs) + def __getattr__(self, name): + return getattr(self.conn, name) -@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed") -class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest): - """ - Base class for testing the sqlalchemy backend. + def close(self): + self.conn.close() - Subclasses for specific database types are created below. Tests that - deviate for each flavor are overwritten there. + with contextlib.closing(MockSqliteConnection(":memory:")) as conn: + with tm.assert_produces_warning(UserWarning): + sql.read_sql("SELECT 1", conn) - """ - flavor: str +def test_sqlite_read_sql_delegate(sqlite_buildin_iris): + conn = sqlite_buildin_iris + iris_frame1 = sql.read_sql_query("SELECT * FROM iris", conn) + iris_frame2 = sql.read_sql("SELECT * FROM iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) - @classmethod - def setup_class(cls): - cls.setup_driver() - cls.setup_engine() + msg = "Execution failed on sql 'iris': near \"iris\": syntax error" + with pytest.raises(sql.DatabaseError, match=msg): + sql.read_sql("iris", conn) - @pytest.fixture(autouse=True) - def setup_method(self, iris_path, types_data): - try: - self.conn = self.engine.connect() - self.pandasSQL = sql.SQLDatabase(self.conn) - except sqlalchemy.exc.OperationalError: - pytest.skip(f"Can't connect to {self.flavor} server") - self.load_iris_data(iris_path) - self.load_types_data(types_data) - @classmethod - def setup_driver(cls): - raise NotImplementedError() +def test_get_schema2(test_frame1): + # without providing a connection object (available for backwards comp) + create_sql = sql.get_schema(test_frame1, "test") + assert "CREATE" in create_sql - @classmethod - def setup_engine(cls): - raise NotImplementedError() - def test_read_sql_parameter(self, sql_strings): - self._read_sql_iris_parameter(sql_strings) +def test_sqlite_type_mapping(sqlite_buildin): + # Test Timestamp objects (no datetime64 because of timezone) (GH9085) + conn = sqlite_buildin + df = DataFrame( + {"time": to_datetime(["2014-12-12 01:54", "2014-12-11 02:54"], utc=True)} + ) + db = sql.SQLiteDatabase(conn) + table = sql.SQLiteTable("test_type", db, frame=df) + schema = table.sql_schema() + for col in schema.split("\n"): + if col.split()[0].strip('"') == "time": + assert col.split()[1] == "TIMESTAMP" - def test_read_sql_named_parameter(self, sql_strings): - self._read_sql_iris_named_parameter(sql_strings) - def test_to_sql_empty(self, test_frame1): - self._to_sql_empty(test_frame1) +# ----------------------------------------------------------------------------- +# -- Database flavor specific tests - def test_create_table(self): - from sqlalchemy import inspect - temp_conn = self.connect() - temp_frame = DataFrame( - {"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]} - ) - with sql.SQLDatabase(temp_conn, need_transaction=True) as pandasSQL: - assert pandasSQL.to_sql(temp_frame, "temp_frame") == 4 +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_create_table(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") - insp = inspect(temp_conn) - assert insp.has_table("temp_frame") + conn = request.getfixturevalue(conn) - # Cleanup - with sql.SQLDatabase(temp_conn, need_transaction=True) as pandasSQL: - pandasSQL.drop_table("temp_frame") + from sqlalchemy import inspect - def test_drop_table(self): - from sqlalchemy import inspect + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + assert pandasSQL.to_sql(temp_frame, "temp_frame") == 4 - temp_conn = self.connect() - temp_frame = DataFrame( - {"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]} - ) - pandasSQL = sql.SQLDatabase(temp_conn) + insp = inspect(conn) + assert insp.has_table("temp_frame") + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("temp_frame") + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_drop_table(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import inspect + + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + pandasSQL = sql.SQLDatabase(conn) + with pandasSQL.run_transaction(): assert pandasSQL.to_sql(temp_frame, "temp_frame") == 4 - insp = inspect(temp_conn) - assert insp.has_table("temp_frame") + insp = inspect(conn) + assert insp.has_table("temp_frame") + with pandasSQL.run_transaction(): pandasSQL.drop_table("temp_frame") - try: - insp.clear_cache() # needed with SQLAlchemy 2.0, unavailable prior - except AttributeError: - pass - assert not insp.has_table("temp_frame") + try: + insp.clear_cache() # needed with SQLAlchemy 2.0, unavailable prior + except AttributeError: + pass + assert not insp.has_table("temp_frame") - def test_roundtrip(self, test_frame1): - self._roundtrip(test_frame1) - def test_execute_sql(self): - self._execute_sql() +@pytest.mark.parametrize("conn", all_connectable) +def test_roundtrip(conn, request, test_frame1): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") - def test_read_table(self): - iris_frame = sql.read_sql_table("iris", con=self.conn) - check_iris_frame(iris_frame) + conn = request.getfixturevalue(conn) + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame_roundtrip") == 4 + result = pandasSQL.read_query("SELECT * FROM test_frame_roundtrip") - def test_read_table_columns(self): - iris_frame = sql.read_sql_table( - "iris", con=self.conn, columns=["SepalLength", "SepalLength"] - ) - tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"]) + result.set_index("level_0", inplace=True) + # result.index.astype(int) - def test_read_table_absent_raises(self): - msg = "Table this_doesnt_exist not found" - with pytest.raises(ValueError, match=msg): - sql.read_sql_table("this_doesnt_exist", con=self.conn) + result.index.name = None - def test_default_type_conversion(self): - df = sql.read_sql_table("types", self.conn) + tm.assert_frame_equal(result, test_frame1) - assert issubclass(df.FloatCol.dtype.type, np.floating) - assert issubclass(df.IntCol.dtype.type, np.integer) - assert issubclass(df.BoolCol.dtype.type, np.bool_) - # Int column with NA values stays as float - assert issubclass(df.IntColWithNull.dtype.type, np.floating) - # Bool column with NA values becomes object - assert issubclass(df.BoolColWithNull.dtype.type, object) +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_execute_sql(conn, request): + conn = request.getfixturevalue(conn) + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + iris_results = pandasSQL.execute("SELECT * FROM iris") + row = iris_results.fetchone() + tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"]) - def test_bigint(self): - # int64 should be converted to BigInteger, GH7433 - df = DataFrame(data={"i64": [2**62]}) - assert df.to_sql(name="test_bigint", con=self.conn, index=False) == 1 - result = sql.read_sql_table("test_bigint", self.conn) - - tm.assert_frame_equal(df, result) - - def test_default_date_load(self): - df = sql.read_sql_table("types", self.conn) - - # IMPORTANT - sqlite has no native date type, so shouldn't parse, but - # MySQL SHOULD be converted. - assert issubclass(df.DateCol.dtype.type, np.datetime64) - - def test_datetime_with_timezone(self, request): - # edge case that converts postgresql datetime with time zone types - # to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok - # but should be more natural, so coerce to datetime64[ns] for now - - def check(col): - # check that a column is either datetime64[ns] - # or datetime64[ns, UTC] - if lib.is_np_dtype(col.dtype, "M"): - # "2000-01-01 00:00:00-08:00" should convert to - # "2000-01-01 08:00:00" - assert col[0] == Timestamp("2000-01-01 08:00:00") - - # "2000-06-01 00:00:00-07:00" should convert to - # "2000-06-01 07:00:00" - assert col[1] == Timestamp("2000-06-01 07:00:00") - - elif isinstance(col.dtype, DatetimeTZDtype): - assert str(col.dt.tz) == "UTC" - - # "2000-01-01 00:00:00-08:00" should convert to - # "2000-01-01 08:00:00" - # "2000-06-01 00:00:00-07:00" should convert to - # "2000-06-01 07:00:00" - # GH 6415 - expected_data = [ - Timestamp("2000-01-01 08:00:00", tz="UTC"), - Timestamp("2000-06-01 07:00:00", tz="UTC"), - ] - expected = Series(expected_data, name=col.name) - tm.assert_series_equal(col, expected) - else: - raise AssertionError( - f"DateCol loaded with incorrect type -> {col.dtype}" - ) +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_sqlalchemy_read_table(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_table("iris", con=conn) + check_iris_frame(iris_frame) - # GH11216 - df = read_sql_query("select * from types", self.conn) - if not hasattr(df, "DateColWithTz"): - request.node.add_marker( - pytest.mark.xfail(reason="no column with datetime with time zone") - ) - # this is parsed on Travis (linux), but not on macosx for some reason - # even with the same versions of psycopg2 & sqlalchemy, possibly a - # Postgresql server version difference - col = df.DateColWithTz - assert isinstance(col.dtype, DatetimeTZDtype) +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_sqlalchemy_read_table_columns(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_table( + "iris", con=conn, columns=["SepalLength", "SepalLength"] + ) + tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"]) - df = read_sql_query( - "select * from types", self.conn, parse_dates=["DateColWithTz"] - ) - if not hasattr(df, "DateColWithTz"): - request.node.add_marker( - pytest.mark.xfail(reason="no column with datetime with time zone") - ) - col = df.DateColWithTz - assert isinstance(col.dtype, DatetimeTZDtype) - assert str(col.dt.tz) == "UTC" - check(df.DateColWithTz) - - df = concat( - list(read_sql_query("select * from types", self.conn, chunksize=1)), - ignore_index=True, - ) - col = df.DateColWithTz - assert isinstance(col.dtype, DatetimeTZDtype) - assert str(col.dt.tz) == "UTC" - expected = sql.read_sql_table("types", self.conn) - col = expected.DateColWithTz - assert isinstance(col.dtype, DatetimeTZDtype) - tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz) - - # xref #7139 - # this might or might not be converted depending on the postgres driver - df = sql.read_sql_table("types", self.conn) - check(df.DateColWithTz) - - def test_datetime_with_timezone_roundtrip(self): - # GH 9086 - # Write datetimetz data to a db and read it back - # For dbs that support timestamps with timezones, should get back UTC - # otherwise naive data should be returned - expected = DataFrame( - {"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")} + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_table_absent_raises(conn, request): + conn = request.getfixturevalue(conn) + msg = "Table this_doesnt_exist not found" + with pytest.raises(ValueError, match=msg): + sql.read_sql_table("this_doesnt_exist", con=conn) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_sqlalchemy_default_type_conversion(conn, request): + conn_name = conn + if conn_name == "sqlite_str": + pytest.skip("types tables not created in sqlite_str fixture") + elif "mysql" in conn_name or "sqlite" in conn_name: + request.node.add_marker( + pytest.mark.xfail(reason="boolean dtype not inferred properly") ) - assert expected.to_sql(name="test_datetime_tz", con=self.conn, index=False) == 3 - if self.flavor == "postgresql": - # SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC - expected["A"] = expected["A"].dt.tz_convert("UTC") - else: - # Otherwise, timestamps are returned as local, naive - expected["A"] = expected["A"].dt.tz_localize(None) + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) - result = sql.read_sql_table("test_datetime_tz", self.conn) - tm.assert_frame_equal(result, expected) + assert issubclass(df.FloatCol.dtype.type, np.floating) + assert issubclass(df.IntCol.dtype.type, np.integer) + assert issubclass(df.BoolCol.dtype.type, np.bool_) - result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn) - if self.flavor == "sqlite": - # read_sql_query does not return datetime type like read_sql_table - assert isinstance(result.loc[0, "A"], str) - result["A"] = to_datetime(result["A"]) - tm.assert_frame_equal(result, expected) + # Int column with NA values stays as float + assert issubclass(df.IntColWithNull.dtype.type, np.floating) + # Bool column with NA values becomes object + assert issubclass(df.BoolColWithNull.dtype.type, object) - def test_out_of_bounds_datetime(self): - # GH 26761 - data = DataFrame({"date": datetime(9999, 1, 1)}, index=[0]) - assert data.to_sql(name="test_datetime_obb", con=self.conn, index=False) == 1 - result = sql.read_sql_table("test_datetime_obb", self.conn) - expected = DataFrame([pd.NaT], columns=["date"]) - tm.assert_frame_equal(result, expected) - def test_naive_datetimeindex_roundtrip(self): - # GH 23510 - # Ensure that a naive DatetimeIndex isn't converted to UTC - dates = date_range("2018-01-01", periods=5, freq="6H")._with_freq(None) - expected = DataFrame({"nums": range(5)}, index=dates) - assert ( - expected.to_sql(name="foo_table", con=self.conn, index_label="info_date") - == 5 - ) - result = sql.read_sql_table("foo_table", self.conn, index_col="info_date") - # result index with gain a name from a set_index operation; expected - tm.assert_frame_equal(result, expected, check_names=False) +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_bigint(conn, request): + # int64 should be converted to BigInteger, GH7433 + conn = request.getfixturevalue(conn) + df = DataFrame(data={"i64": [2**62]}) + assert df.to_sql(name="test_bigint", con=conn, index=False) == 1 + result = sql.read_sql_table("test_bigint", conn) - def test_date_parsing(self): - # No Parsing - df = sql.read_sql_table("types", self.conn) - expected_type = object if self.flavor == "sqlite" else np.datetime64 - assert issubclass(df.DateCol.dtype.type, expected_type) + tm.assert_frame_equal(df, result) - df = sql.read_sql_table("types", self.conn, parse_dates=["DateCol"]) - assert issubclass(df.DateCol.dtype.type, np.datetime64) - df = sql.read_sql_table( - "types", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"} +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_default_date_load(conn, request): + conn_name = conn + if conn_name == "sqlite_str": + pytest.skip("types tables not created in sqlite_str fixture") + elif "sqlite" in conn_name: + request.node.add_marker( + pytest.mark.xfail(reason="sqlite does not read date properly") ) - assert issubclass(df.DateCol.dtype.type, np.datetime64) - df = sql.read_sql_table( - "types", - self.conn, - parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}}, - ) - assert issubclass(df.DateCol.dtype.type, np.datetime64) + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + + assert issubclass(df.DateCol.dtype.type, np.datetime64) - df = sql.read_sql_table("types", self.conn, parse_dates=["IntDateCol"]) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) - df = sql.read_sql_table("types", self.conn, parse_dates={"IntDateCol": "s"}) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_datetime_with_timezone(conn, request): + # edge case that converts postgresql datetime with time zone types + # to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok + # but should be more natural, so coerce to datetime64[ns] for now + + def check(col): + # check that a column is either datetime64[ns] + # or datetime64[ns, UTC] + if lib.is_np_dtype(col.dtype, "M"): + # "2000-01-01 00:00:00-08:00" should convert to + # "2000-01-01 08:00:00" + assert col[0] == Timestamp("2000-01-01 08:00:00") + + # "2000-06-01 00:00:00-07:00" should convert to + # "2000-06-01 07:00:00" + assert col[1] == Timestamp("2000-06-01 07:00:00") + + elif isinstance(col.dtype, DatetimeTZDtype): + assert str(col.dt.tz) == "UTC" + + # "2000-01-01 00:00:00-08:00" should convert to + # "2000-01-01 08:00:00" + # "2000-06-01 00:00:00-07:00" should convert to + # "2000-06-01 07:00:00" + # GH 6415 + expected_data = [ + Timestamp("2000-01-01 08:00:00", tz="UTC"), + Timestamp("2000-06-01 07:00:00", tz="UTC"), + ] + expected = Series(expected_data, name=col.name) + tm.assert_series_equal(col, expected) - df = sql.read_sql_table( - "types", self.conn, parse_dates={"IntDateCol": {"unit": "s"}} + else: + raise AssertionError(f"DateCol loaded with incorrect type -> {col.dtype}") + + # GH11216 + conn = request.getfixturevalue(conn) + df = read_sql_query("select * from types", conn) + if not hasattr(df, "DateColWithTz"): + request.node.add_marker( + pytest.mark.xfail(reason="no column with datetime with time zone") ) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) - def test_datetime(self): - df = DataFrame( - {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} + # this is parsed on Travis (linux), but not on macosx for some reason + # even with the same versions of psycopg2 & sqlalchemy, possibly a + # Postgresql server version difference + col = df.DateColWithTz + assert isinstance(col.dtype, DatetimeTZDtype) + + df = read_sql_query("select * from types", conn, parse_dates=["DateColWithTz"]) + if not hasattr(df, "DateColWithTz"): + request.node.add_marker( + pytest.mark.xfail(reason="no column with datetime with time zone") ) - assert df.to_sql(name="test_datetime", con=self.conn) == 3 + col = df.DateColWithTz + assert isinstance(col.dtype, DatetimeTZDtype) + assert str(col.dt.tz) == "UTC" + check(df.DateColWithTz) + + df = concat( + list(read_sql_query("select * from types", conn, chunksize=1)), + ignore_index=True, + ) + col = df.DateColWithTz + assert isinstance(col.dtype, DatetimeTZDtype) + assert str(col.dt.tz) == "UTC" + expected = sql.read_sql_table("types", conn) + col = expected.DateColWithTz + assert isinstance(col.dtype, DatetimeTZDtype) + tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz) + + # xref #7139 + # this might or might not be converted depending on the postgres driver + df = sql.read_sql_table("types", conn) + check(df.DateColWithTz) - # with read_table -> type information from schema used - result = sql.read_sql_table("test_datetime", self.conn) - result = result.drop("index", axis=1) - tm.assert_frame_equal(result, df) - # with read_sql -> no type information -> sqlite has no native - result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn) - result = result.drop("index", axis=1) - if self.flavor == "sqlite": - assert isinstance(result.loc[0, "A"], str) - result["A"] = to_datetime(result["A"]) - tm.assert_frame_equal(result, df) - else: - tm.assert_frame_equal(result, df) +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_with_timezone_roundtrip(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + # GH 9086 + # Write datetimetz data to a db and read it back + # For dbs that support timestamps with timezones, should get back UTC + # otherwise naive data should be returned + expected = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")} + ) + assert expected.to_sql(name="test_datetime_tz", con=conn, index=False) == 3 - def test_datetime_NaT(self): - df = DataFrame( - {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} - ) - df.loc[1, "A"] = np.nan - assert df.to_sql(name="test_datetime", con=self.conn, index=False) == 3 + if "postgresql" in conn_name: + # SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC + expected["A"] = expected["A"].dt.tz_convert("UTC") + else: + # Otherwise, timestamps are returned as local, naive + expected["A"] = expected["A"].dt.tz_localize(None) - # with read_table -> type information from schema used - result = sql.read_sql_table("test_datetime", self.conn) - tm.assert_frame_equal(result, df) + result = sql.read_sql_table("test_datetime_tz", conn) + tm.assert_frame_equal(result, expected) - # with read_sql -> no type information -> sqlite has no native - result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn) - if self.flavor == "sqlite": - assert isinstance(result.loc[0, "A"], str) - result["A"] = to_datetime(result["A"], errors="coerce") - tm.assert_frame_equal(result, df) - else: - tm.assert_frame_equal(result, df) - - def test_datetime_date(self): - # test support for datetime.date - df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) - assert df.to_sql(name="test_date", con=self.conn, index=False) == 2 - res = read_sql_table("test_date", self.conn) - result = res["a"] - expected = to_datetime(df["a"]) - # comes back as datetime64 - tm.assert_series_equal(result, expected) - - def test_datetime_time(self, sqlite_buildin): - # test support for datetime.time - df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"]) - assert df.to_sql(name="test_time", con=self.conn, index=False) == 2 - res = read_sql_table("test_time", self.conn) - tm.assert_frame_equal(res, df) - - # GH8341 - # first, use the fallback to have the sqlite adapter put in place - sqlite_conn = sqlite_buildin - assert sql.to_sql(df, "test_time2", sqlite_conn, index=False) == 2 - res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn) - ref = df.map(lambda _: _.strftime("%H:%M:%S.%f")) - tm.assert_frame_equal(ref, res) # check if adapter is in place - # then test if sqlalchemy is unaffected by the sqlite adapter - assert sql.to_sql(df, "test_time3", self.conn, index=False) == 2 - if self.flavor == "sqlite": - res = sql.read_sql_query("SELECT * FROM test_time3", self.conn) - ref = df.map(lambda _: _.strftime("%H:%M:%S.%f")) - tm.assert_frame_equal(ref, res) - res = sql.read_sql_table("test_time3", self.conn) - tm.assert_frame_equal(df, res) - - def test_mixed_dtype_insert(self): - # see GH6509 - s1 = Series(2**25 + 1, dtype=np.int32) - s2 = Series(0.0, dtype=np.float32) - df = DataFrame({"s1": s1, "s2": s2}) - - # write and read again - assert df.to_sql(name="test_read_write", con=self.conn, index=False) == 1 - df2 = sql.read_sql_table("test_read_write", self.conn) - - tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True) - - def test_nan_numeric(self): - # NaNs in numeric float column - df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]}) - assert df.to_sql(name="test_nan", con=self.conn, index=False) == 3 - - # with read_table - result = sql.read_sql_table("test_nan", self.conn) - tm.assert_frame_equal(result, df) + result = sql.read_sql_query("SELECT * FROM test_datetime_tz", conn) + if "sqlite" in conn_name: + # read_sql_query does not return datetime type like read_sql_table + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"]) + tm.assert_frame_equal(result, expected) - # with read_sql - result = sql.read_sql_query("SELECT * FROM test_nan", self.conn) - tm.assert_frame_equal(result, df) - def test_nan_fullcolumn(self): - # full NaN column (numeric float column) - df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]}) - assert df.to_sql(name="test_nan", con=self.conn, index=False) == 3 +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_out_of_bounds_datetime(conn, request): + # GH 26761 + conn = request.getfixturevalue(conn) + data = DataFrame({"date": datetime(9999, 1, 1)}, index=[0]) + assert data.to_sql(name="test_datetime_obb", con=conn, index=False) == 1 + result = sql.read_sql_table("test_datetime_obb", conn) + expected = DataFrame([pd.NaT], columns=["date"]) + tm.assert_frame_equal(result, expected) - # with read_table - result = sql.read_sql_table("test_nan", self.conn) - tm.assert_frame_equal(result, df) - # with read_sql -> not type info from table -> stays None - df["B"] = df["B"].astype("object") - df["B"] = None - result = sql.read_sql_query("SELECT * FROM test_nan", self.conn) - tm.assert_frame_equal(result, df) +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_naive_datetimeindex_roundtrip(conn, request): + # GH 23510 + # Ensure that a naive DatetimeIndex isn't converted to UTC + conn = request.getfixturevalue(conn) + dates = date_range("2018-01-01", periods=5, freq="6H")._with_freq(None) + expected = DataFrame({"nums": range(5)}, index=dates) + assert expected.to_sql(name="foo_table", con=conn, index_label="info_date") == 5 + result = sql.read_sql_table("foo_table", conn, index_col="info_date") + # result index with gain a name from a set_index operation; expected + tm.assert_frame_equal(result, expected, check_names=False) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_date_parsing(conn, request): + # No Parsing + conn_name = conn + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + expected_type = object if "sqlite" in conn_name else np.datetime64 + assert issubclass(df.DateCol.dtype.type, expected_type) + + df = sql.read_sql_table("types", conn, parse_dates=["DateCol"]) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}) + assert issubclass(df.DateCol.dtype.type, np.datetime64) - def test_nan_string(self): - # NaNs in string column - df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]}) - assert df.to_sql(name="test_nan", con=self.conn, index=False) == 3 + df = sql.read_sql_table( + "types", + conn, + parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}}, + ) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates=["IntDateCol"]) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"IntDateCol": "s"}) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"IntDateCol": {"unit": "s"}}) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) - # NaNs are coming back as None - df.loc[2, "B"] = None - # with read_table - result = sql.read_sql_table("test_nan", self.conn) +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} + ) + assert df.to_sql(name="test_datetime", con=conn) == 3 + + # with read_table -> type information from schema used + result = sql.read_sql_table("test_datetime", conn) + result = result.drop("index", axis=1) + tm.assert_frame_equal(result, df) + + # with read_sql -> no type information -> sqlite has no native + result = sql.read_sql_query("SELECT * FROM test_datetime", conn) + result = result.drop("index", axis=1) + if "sqlite" in conn_name: + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"]) + tm.assert_frame_equal(result, df) + else: tm.assert_frame_equal(result, df) - # with read_sql - result = sql.read_sql_query("SELECT * FROM test_nan", self.conn) + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_NaT(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} + ) + df.loc[1, "A"] = np.nan + assert df.to_sql(name="test_datetime", con=conn, index=False) == 3 + + # with read_table -> type information from schema used + result = sql.read_sql_table("test_datetime", conn) + tm.assert_frame_equal(result, df) + + # with read_sql -> no type information -> sqlite has no native + result = sql.read_sql_query("SELECT * FROM test_datetime", conn) + if "sqlite" in conn_name: + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"], errors="coerce") + tm.assert_frame_equal(result, df) + else: tm.assert_frame_equal(result, df) - def _get_index_columns(self, tbl_name): + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_date(conn, request): + # test support for datetime.date + conn = request.getfixturevalue(conn) + df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) + assert df.to_sql(name="test_date", con=conn, index=False) == 2 + res = read_sql_table("test_date", conn) + result = res["a"] + expected = to_datetime(df["a"]) + # comes back as datetime64 + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_time(conn, request, sqlite_buildin): + # test support for datetime.time + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"]) + assert df.to_sql(name="test_time", con=conn, index=False) == 2 + res = read_sql_table("test_time", conn) + tm.assert_frame_equal(res, df) + + # GH8341 + # first, use the fallback to have the sqlite adapter put in place + sqlite_conn = sqlite_buildin + assert sql.to_sql(df, "test_time2", sqlite_conn, index=False) == 2 + res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn) + ref = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(ref, res) # check if adapter is in place + # then test if sqlalchemy is unaffected by the sqlite adapter + assert sql.to_sql(df, "test_time3", conn, index=False) == 2 + if "sqlite" in conn_name: + res = sql.read_sql_query("SELECT * FROM test_time3", conn) + ref = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(ref, res) + res = sql.read_sql_table("test_time3", conn) + tm.assert_frame_equal(df, res) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_mixed_dtype_insert(conn, request): + # see GH6509 + conn = request.getfixturevalue(conn) + s1 = Series(2**25 + 1, dtype=np.int32) + s2 = Series(0.0, dtype=np.float32) + df = DataFrame({"s1": s1, "s2": s2}) + + # write and read again + assert df.to_sql(name="test_read_write", con=conn, index=False) == 1 + df2 = sql.read_sql_table("test_read_write", conn) + + tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_numeric(conn, request): + # NaNs in numeric float column + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_fullcolumn(conn, request): + # full NaN column (numeric float column) + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql -> not type info from table -> stays None + df["B"] = df["B"].astype("object") + df["B"] = None + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_string(conn, request): + # NaNs in string column + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # NaNs are coming back as None + df.loc[2, "B"] = None + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_save_index(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame.from_records( + [(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"] + ) + + pandasSQL = pandasSQL_builder(conn) + tbl_name = "test_to_sql_saves_index" + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(df, tbl_name) == 2 + + if conn_name in {"sqlite_buildin", "sqlite_str"}: + ixs = sql.read_sql_query( + "SELECT * FROM sqlite_master WHERE type = 'index' " + f"AND tbl_name = '{tbl_name}'", + conn, + ) + ix_cols = [] + for ix_name in ixs.name: + ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", conn) + ix_cols.append(ix_info.name.tolist()) + else: from sqlalchemy import inspect - insp = inspect(self.conn) + insp = inspect(conn) ixs = insp.get_indexes(tbl_name) - ixs = [i["column_names"] for i in ixs] - return ixs + ix_cols = [i["column_names"] for i in ixs] + + assert ix_cols == [["A"]] - def test_to_sql_save_index(self): - self._to_sql_save_index() - def test_transactions(self): - self._transaction_test() +@pytest.mark.parametrize("conn", all_connectable) +def test_transactions(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) - def test_get_schema_create_table(self, test_frame3): - # Use a dataframe without a bool column, since MySQL converts bool to - # TINYINT (which read_sql_table returns as an int and causes a dtype - # mismatch) + stmt = "CREATE TABLE test_trans (A INT, B TEXT)" + pandasSQL = pandasSQL_builder(conn) + if conn_name != "sqlite_buildin": from sqlalchemy import text - from sqlalchemy.engine import Engine - tbl = "test_get_schema_create_table" - create_sql = sql.get_schema(test_frame3, tbl, con=self.conn) - blank_test_df = test_frame3.iloc[:0] + stmt = text(stmt) - self.drop_table(tbl, self.conn) - create_sql = text(create_sql) - if isinstance(self.conn, Engine): - with self.conn.connect() as conn: - with conn.begin(): - conn.execute(create_sql) + with pandasSQL.run_transaction() as trans: + trans.execute(stmt) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_transaction_rollback(conn, request): + conn = request.getfixturevalue(conn) + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction() as trans: + stmt = "CREATE TABLE test_trans (A INT, B TEXT)" + if isinstance(pandasSQL, SQLiteDatabase): + trans.execute(stmt) else: - with self.conn.begin(): - self.conn.execute(create_sql) - returned_df = sql.read_sql_table(tbl, self.conn) - tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False) - self.drop_table(tbl, self.conn) - - def test_dtype(self): - from sqlalchemy import ( - TEXT, - String, - ) - from sqlalchemy.schema import MetaData - - cols = ["A", "B"] - data = [(0.8, True), (0.9, None)] - df = DataFrame(data, columns=cols) - assert df.to_sql(name="dtype_test", con=self.conn) == 2 - assert df.to_sql(name="dtype_test2", con=self.conn, dtype={"B": TEXT}) == 2 - meta = MetaData() - meta.reflect(bind=self.conn) - sqltype = meta.tables["dtype_test2"].columns["B"].type - assert isinstance(sqltype, TEXT) - msg = "The type of B is not a SQLAlchemy type" - with pytest.raises(ValueError, match=msg): - df.to_sql(name="error", con=self.conn, dtype={"B": str}) + from sqlalchemy import text - # GH9083 - assert ( - df.to_sql(name="dtype_test3", con=self.conn, dtype={"B": String(10)}) == 2 - ) - meta.reflect(bind=self.conn) - sqltype = meta.tables["dtype_test3"].columns["B"].type - assert isinstance(sqltype, String) - assert sqltype.length == 10 - - # single dtype - assert df.to_sql(name="single_dtype_test", con=self.conn, dtype=TEXT) == 2 - meta.reflect(bind=self.conn) - sqltypea = meta.tables["single_dtype_test"].columns["A"].type - sqltypeb = meta.tables["single_dtype_test"].columns["B"].type - assert isinstance(sqltypea, TEXT) - assert isinstance(sqltypeb, TEXT) - - def test_notna_dtype(self): - from sqlalchemy import ( - Boolean, - DateTime, - Float, - Integer, + stmt = text(stmt) + trans.execute(stmt) + + class DummyException(Exception): + pass + + # Make sure when transaction is rolled back, no rows get inserted + ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')" + if isinstance(pandasSQL, SQLDatabase): + from sqlalchemy import text + + ins_sql = text(ins_sql) + try: + with pandasSQL.run_transaction() as trans: + trans.execute(ins_sql) + raise DummyException("error") + except DummyException: + # ignore raised exception + pass + with pandasSQL.run_transaction(): + res = pandasSQL.read_query("SELECT * FROM test_trans") + assert len(res) == 0 + + # Make sure when transaction is committed, rows do get inserted + with pandasSQL.run_transaction() as trans: + trans.execute(ins_sql) + res2 = pandasSQL.read_query("SELECT * FROM test_trans") + assert len(res2) == 1 + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_get_schema_create_table(conn, request, test_frame3): + # Use a dataframe without a bool column, since MySQL converts bool to + # TINYINT (which read_sql_table returns as an int and causes a dtype + # mismatch) + if conn == "sqlite_str": + request.node.add_marker( + pytest.mark.xfail(reason="test does not support sqlite_str fixture") ) - from sqlalchemy.schema import MetaData - cols = { - "Bool": Series([True, None]), - "Date": Series([datetime(2012, 5, 1), None]), - "Int": Series([1, None], dtype="object"), - "Float": Series([1.1, None]), + conn = request.getfixturevalue(conn) + + from sqlalchemy import text + from sqlalchemy.engine import Engine + + tbl = "test_get_schema_create_table" + create_sql = sql.get_schema(test_frame3, tbl, con=conn) + blank_test_df = test_frame3.iloc[:0] + + create_sql = text(create_sql) + if isinstance(conn, Engine): + with conn.connect() as newcon: + with newcon.begin(): + newcon.execute(create_sql) + else: + conn.execute(create_sql) + returned_df = sql.read_sql_table(tbl, conn) + tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_dtype(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + TEXT, + String, + ) + from sqlalchemy.schema import MetaData + + cols = ["A", "B"] + data = [(0.8, True), (0.9, None)] + df = DataFrame(data, columns=cols) + assert df.to_sql(name="dtype_test", con=conn) == 2 + assert df.to_sql(name="dtype_test2", con=conn, dtype={"B": TEXT}) == 2 + meta = MetaData() + meta.reflect(bind=conn) + sqltype = meta.tables["dtype_test2"].columns["B"].type + assert isinstance(sqltype, TEXT) + msg = "The type of B is not a SQLAlchemy type" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="error", con=conn, dtype={"B": str}) + + # GH9083 + assert df.to_sql(name="dtype_test3", con=conn, dtype={"B": String(10)}) == 2 + meta.reflect(bind=conn) + sqltype = meta.tables["dtype_test3"].columns["B"].type + assert isinstance(sqltype, String) + assert sqltype.length == 10 + + # single dtype + assert df.to_sql(name="single_dtype_test", con=conn, dtype=TEXT) == 2 + meta.reflect(bind=conn) + sqltypea = meta.tables["single_dtype_test"].columns["A"].type + sqltypeb = meta.tables["single_dtype_test"].columns["B"].type + assert isinstance(sqltypea, TEXT) + assert isinstance(sqltypeb, TEXT) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_notna_dtype(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn_name = conn + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + Boolean, + DateTime, + Float, + Integer, + ) + from sqlalchemy.schema import MetaData + + cols = { + "Bool": Series([True, None]), + "Date": Series([datetime(2012, 5, 1), None]), + "Int": Series([1, None], dtype="object"), + "Float": Series([1.1, None]), + } + df = DataFrame(cols) + + tbl = "notna_dtype_test" + assert df.to_sql(name=tbl, con=conn) == 2 + _ = sql.read_sql_table(tbl, conn) + meta = MetaData() + meta.reflect(bind=conn) + my_type = Integer if "mysql" in conn_name else Boolean + col_dict = meta.tables[tbl].columns + assert isinstance(col_dict["Bool"].type, my_type) + assert isinstance(col_dict["Date"].type, DateTime) + assert isinstance(col_dict["Int"].type, Integer) + assert isinstance(col_dict["Float"].type, Float) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_double_precision(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + BigInteger, + Float, + Integer, + ) + from sqlalchemy.schema import MetaData + + V = 1.23456789101112131415 + + df = DataFrame( + { + "f32": Series([V], dtype="float32"), + "f64": Series([V], dtype="float64"), + "f64_as_f32": Series([V], dtype="float64"), + "i32": Series([5], dtype="int32"), + "i64": Series([5], dtype="int64"), } - df = DataFrame(cols) - - tbl = "notna_dtype_test" - assert df.to_sql(name=tbl, con=self.conn) == 2 - _ = sql.read_sql_table(tbl, self.conn) - meta = MetaData() - meta.reflect(bind=self.conn) - my_type = Integer if self.flavor == "mysql" else Boolean - col_dict = meta.tables[tbl].columns - assert isinstance(col_dict["Bool"].type, my_type) - assert isinstance(col_dict["Date"].type, DateTime) - assert isinstance(col_dict["Int"].type, Integer) - assert isinstance(col_dict["Float"].type, Float) - - def test_double_precision(self): - from sqlalchemy import ( - BigInteger, - Float, - Integer, + ) + + assert ( + df.to_sql( + name="test_dtypes", + con=conn, + index=False, + if_exists="replace", + dtype={"f64_as_f32": Float(precision=23)}, ) - from sqlalchemy.schema import MetaData + == 1 + ) + res = sql.read_sql_table("test_dtypes", conn) - V = 1.23456789101112131415 + # check precision of float64 + assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14) - df = DataFrame( - { - "f32": Series([V], dtype="float32"), - "f64": Series([V], dtype="float64"), - "f64_as_f32": Series([V], dtype="float64"), - "i32": Series([5], dtype="int32"), - "i64": Series([5], dtype="int64"), - } - ) + # check sql types + meta = MetaData() + meta.reflect(bind=conn) + col_dict = meta.tables["test_dtypes"].columns + assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type) + assert isinstance(col_dict["f32"].type, Float) + assert isinstance(col_dict["f64"].type, Float) + assert isinstance(col_dict["i32"].type, Integer) + assert isinstance(col_dict["i64"].type, BigInteger) - assert ( - df.to_sql( - name="test_dtypes", - con=self.conn, - index=False, - if_exists="replace", - dtype={"f64_as_f32": Float(precision=23)}, - ) - == 1 - ) - res = sql.read_sql_table("test_dtypes", self.conn) - - # check precision of float64 - assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14) - - # check sql types - meta = MetaData() - meta.reflect(bind=self.conn) - col_dict = meta.tables["test_dtypes"].columns - assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type) - assert isinstance(col_dict["f32"].type, Float) - assert isinstance(col_dict["f64"].type, Float) - assert isinstance(col_dict["i32"].type, Integer) - assert isinstance(col_dict["i64"].type, BigInteger) - - def test_connectable_issue_example(self): - # This tests the example raised in issue - # https://github.com/pandas-dev/pandas/issues/10104 - from sqlalchemy.engine import Engine - def test_select(connection): - query = "SELECT test_foo_data FROM test_foo_data" - return sql.read_sql_query(query, con=connection) +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_connectable_issue_example(conn, request): + conn = request.getfixturevalue(conn) - def test_append(connection, data): - data.to_sql(name="test_foo_data", con=connection, if_exists="append") + # This tests the example raised in issue + # https://github.com/pandas-dev/pandas/issues/10104 + from sqlalchemy.engine import Engine - def test_connectable(conn): - # https://github.com/sqlalchemy/sqlalchemy/commit/ - # 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973 - foo_data = test_select(conn) - test_append(conn, foo_data) + def test_select(connection): + query = "SELECT test_foo_data FROM test_foo_data" + return sql.read_sql_query(query, con=connection) - def main(connectable): - if isinstance(connectable, Engine): - with connectable.connect() as conn: - with conn.begin(): - test_connectable(conn) - else: - test_connectable(connectable) + def test_append(connection, data): + data.to_sql(name="test_foo_data", con=connection, if_exists="append") - assert ( - DataFrame({"test_foo_data": [0, 1, 2]}).to_sql( - name="test_foo_data", con=self.conn - ) - == 3 - ) - main(self.conn) - - @pytest.mark.parametrize( - "input", - [{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}], - ) - def test_to_sql_with_negative_npinf(self, input, request): - # GH 34431 - - df = DataFrame(input) - - if self.flavor == "mysql": - # GH 36465 - # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error - # for pymysql version >= 0.10 - # TODO(GH#36465): remove this version check after GH 36465 is fixed - pymysql = pytest.importorskip("pymysql") - - if ( - Version(pymysql.__version__) < Version("1.0.3") - and "infe0" in df.columns - ): - mark = pytest.mark.xfail(reason="GH 36465") - request.node.add_marker(mark) - - msg = "inf cannot be used with MySQL" - with pytest.raises(ValueError, match=msg): - df.to_sql(name="foobar", con=self.conn, index=False) + def test_connectable(conn): + # https://github.com/sqlalchemy/sqlalchemy/commit/ + # 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973 + foo_data = test_select(conn) + test_append(conn, foo_data) + + def main(connectable): + if isinstance(connectable, Engine): + with connectable.connect() as conn: + with conn.begin(): + test_connectable(conn) else: - assert df.to_sql(name="foobar", con=self.conn, index=False) == 1 - res = sql.read_sql_table("foobar", self.conn) - tm.assert_equal(df, res) - - def test_temporary_table(self): - from sqlalchemy import ( - Column, - Integer, - Unicode, - select, - ) - from sqlalchemy.orm import ( - Session, - declarative_base, - ) + test_connectable(connectable) + + assert ( + DataFrame({"test_foo_data": [0, 1, 2]}).to_sql(name="test_foo_data", con=conn) + == 3 + ) + main(conn) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize( + "input", + [{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}], +) +def test_to_sql_with_negative_npinf(conn, request, input): + # GH 34431 + + df = DataFrame(input) + conn_name = conn + conn = request.getfixturevalue(conn) + + if "mysql" in conn_name: + # GH 36465 + # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error + # for pymysql version >= 0.10 + # TODO(GH#36465): remove this version check after GH 36465 is fixed + pymysql = pytest.importorskip("pymysql") - test_data = "Hello, World!" - expected = DataFrame({"spam": [test_data]}) - Base = declarative_base() - - class Temporary(Base): - __tablename__ = "temp_test" - __table_args__ = {"prefixes": ["TEMPORARY"]} - id = Column(Integer, primary_key=True) - spam = Column(Unicode(30), nullable=False) - - with Session(self.conn) as session: - with session.begin(): - conn = session.connection() - Temporary.__table__.create(conn) - session.add(Temporary(spam=test_data)) - session.flush() - df = sql.read_sql_query(sql=select(Temporary.spam), con=conn) - tm.assert_frame_equal(df, expected) - - # -- SQL Engine tests (in the base class for now) - def test_invalid_engine(self, test_frame1): - msg = "engine must be one of 'auto', 'sqlalchemy'" + if Version(pymysql.__version__) < Version("1.0.3") and "infe0" in df.columns: + mark = pytest.mark.xfail(reason="GH 36465") + request.node.add_marker(mark) + + msg = "inf cannot be used with MySQL" with pytest.raises(ValueError, match=msg): - self._to_sql_with_sql_engine(test_frame1, "bad_engine") + df.to_sql(name="foobar", con=conn, index=False) + else: + assert df.to_sql(name="foobar", con=conn, index=False) == 1 + res = sql.read_sql_table("foobar", conn) + tm.assert_equal(df, res) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_temporary_table(conn, request): + if conn == "sqlite_str": + pytest.skip("test does not work with str connection") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + Column, + Integer, + Unicode, + select, + ) + from sqlalchemy.orm import ( + Session, + declarative_base, + ) + + test_data = "Hello, World!" + expected = DataFrame({"spam": [test_data]}) + Base = declarative_base() + + class Temporary(Base): + __tablename__ = "temp_test" + __table_args__ = {"prefixes": ["TEMPORARY"]} + id = Column(Integer, primary_key=True) + spam = Column(Unicode(30), nullable=False) + + with Session(conn) as session: + with session.begin(): + conn = session.connection() + Temporary.__table__.create(conn) + session.add(Temporary(spam=test_data)) + session.flush() + df = sql.read_sql_query(sql=select(Temporary.spam), con=conn) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_invalid_engine(conn, request, test_frame1): + if conn == "sqlite_buildin": + request.node.add_marker( + pytest.mark.xfail(reason="SQLiteDatabase does not raise for bad engine") + ) + + conn = request.getfixturevalue(conn) + msg = "engine must be one of 'auto', 'sqlalchemy'" + pandasSQL = pandasSQL_builder(conn) + with pytest.raises(ValueError, match=msg): + pandasSQL.to_sql(test_frame1, "test_frame1", engine="bad_engine") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_with_sql_engine(conn, request, test_frame1): + """`to_sql` with the `engine` param""" + # mostly copied from this class's `_to_sql()` method + conn = request.getfixturevalue(conn) + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1", engine="auto") == 4 + assert pandasSQL.has_table("test_frame1") - def test_options_sqlalchemy(self, test_frame1): - # use the set option - with pd.option_context("io.sql.engine", "sqlalchemy"): - self._to_sql_with_sql_engine(test_frame1) + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_options_sqlalchemy(conn, request, test_frame1): + # use the set option + conn = request.getfixturevalue(conn) + with pd.option_context("io.sql.engine", "sqlalchemy"): + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1") == 4 + assert pandasSQL.has_table("test_frame1") - def test_options_auto(self, test_frame1): - # use the set option - with pd.option_context("io.sql.engine", "auto"): - self._to_sql_with_sql_engine(test_frame1) + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", all_connectable) +def test_options_auto(conn, request, test_frame1): + # use the set option + conn = request.getfixturevalue(conn) + with pd.option_context("io.sql.engine", "auto"): + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1") == 4 + assert pandasSQL.has_table("test_frame1") + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries - def test_options_get_engine(self): + +@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="fails without SQLAlchemy") +def test_options_get_engine(): + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + with pd.option_context("io.sql.engine", "sqlalchemy"): + assert isinstance(get_engine("auto"), SQLAlchemyEngine) assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) - with pd.option_context("io.sql.engine", "sqlalchemy"): - assert isinstance(get_engine("auto"), SQLAlchemyEngine) - assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + with pd.option_context("io.sql.engine", "auto"): + assert isinstance(get_engine("auto"), SQLAlchemyEngine) + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) - with pd.option_context("io.sql.engine", "auto"): - assert isinstance(get_engine("auto"), SQLAlchemyEngine) - assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) - def test_get_engine_auto_error_message(self): - # Expect different error messages from get_engine(engine="auto") - # if engines aren't installed vs. are installed but bad version - pass - # TODO(GH#36893) fill this in when we add more engines - - @pytest.mark.parametrize("func", ["read_sql", "read_sql_query"]) - def test_read_sql_dtype_backend(self, string_storage, func, dtype_backend): - # GH#50048 - table = "test" - df = self.dtype_backend_data() - df.to_sql(name=table, con=self.conn, index=False, if_exists="replace") - - with pd.option_context("mode.string_storage", string_storage): - result = getattr(pd, func)( - f"Select * from {table}", self.conn, dtype_backend=dtype_backend +def test_get_engine_auto_error_message(): + # Expect different error messages from get_engine(engine="auto") + # if engines aren't installed vs. are installed but bad version + pass + # TODO(GH#36893) fill this in when we add more engines + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_query"]) +def test_read_sql_dtype_backend( + conn, + request, + string_storage, + func, + dtype_backend, + dtype_backend_data, + dtype_backend_expected, +): + # GH#50048 + conn_name = conn + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + with pd.option_context("mode.string_storage", string_storage): + result = getattr(pd, func)( + f"Select * from {table}", conn, dtype_backend=dtype_backend + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + tm.assert_frame_equal(result, expected) + + with pd.option_context("mode.string_storage", string_storage): + iterator = getattr(pd, func)( + f"Select * from {table}", + con=conn, + dtype_backend=dtype_backend, + chunksize=3, + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + for result in iterator: + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_table"]) +def test_read_sql_dtype_backend_table( + conn, + request, + string_storage, + func, + dtype_backend, + dtype_backend_data, + dtype_backend_expected, +): + if "sqlite" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason=( + "SQLite actually returns proper boolean values via " + "read_sql_table, but before pytest refactor was skipped" + ) ) - expected = self.dtype_backend_expected(string_storage, dtype_backend) - tm.assert_frame_equal(result, expected) + ) + # GH#50048 + conn_name = conn + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + with pd.option_context("mode.string_storage", string_storage): + result = getattr(pd, func)(table, conn, dtype_backend=dtype_backend) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + tm.assert_frame_equal(result, expected) + + with pd.option_context("mode.string_storage", string_storage): + iterator = getattr(pd, func)( + table, + conn, + dtype_backend=dtype_backend, + chunksize=3, + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + for result in iterator: + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_table", "read_sql_query"]) +def test_read_sql_invalid_dtype_backend_table(conn, request, func, dtype_backend_data): + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + getattr(pd, func)(table, conn, dtype_backend="numpy") - with pd.option_context("mode.string_storage", string_storage): - iterator = getattr(pd, func)( - f"Select * from {table}", - con=self.conn, - dtype_backend=dtype_backend, - chunksize=3, - ) - expected = self.dtype_backend_expected(string_storage, dtype_backend) - for result in iterator: - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("func", ["read_sql", "read_sql_table"]) - def test_read_sql_dtype_backend_table(self, string_storage, func, dtype_backend): - # GH#50048 - table = "test" - df = self.dtype_backend_data() - df.to_sql(name=table, con=self.conn, index=False, if_exists="replace") - - with pd.option_context("mode.string_storage", string_storage): - result = getattr(pd, func)(table, self.conn, dtype_backend=dtype_backend) - expected = self.dtype_backend_expected(string_storage, dtype_backend) - tm.assert_frame_equal(result, expected) - with pd.option_context("mode.string_storage", string_storage): - iterator = getattr(pd, func)( - table, - self.conn, - dtype_backend=dtype_backend, - chunksize=3, - ) - expected = self.dtype_backend_expected(string_storage, dtype_backend) - for result in iterator: - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("func", ["read_sql", "read_sql_table", "read_sql_query"]) - def test_read_sql_invalid_dtype_backend_table(self, func): - table = "test" - df = self.dtype_backend_data() - df.to_sql(name=table, con=self.conn, index=False, if_exists="replace") - - msg = ( - "dtype_backend numpy is invalid, only 'numpy_nullable' and " - "'pyarrow' are allowed." - ) - with pytest.raises(ValueError, match=msg): - getattr(pd, func)(table, self.conn, dtype_backend="numpy") +@pytest.fixture +def dtype_backend_data() -> DataFrame: + return DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) - def dtype_backend_data(self) -> DataFrame: - return DataFrame( - { - "a": Series([1, np.nan, 3], dtype="Int64"), - "b": Series([1, 2, 3], dtype="Int64"), - "c": Series([1.5, np.nan, 2.5], dtype="Float64"), - "d": Series([1.5, 2.0, 2.5], dtype="Float64"), - "e": [True, False, None], - "f": [True, False, True], - "g": ["a", "b", "c"], - "h": ["a", "b", None], - } - ) - def dtype_backend_expected(self, storage, dtype_backend) -> DataFrame: +@pytest.fixture +def dtype_backend_expected(): + def func(storage, dtype_backend, conn_name): string_array: StringArray | ArrowStringArray string_array_na: StringArray | ArrowStringArray if storage == "python": @@ -3041,557 +3195,396 @@ def dtype_backend_expected(self, storage, dtype_backend) -> DataFrame: for col in df.columns } ) - return df - - def test_chunksize_empty_dtypes(self): - # GH#50245 - dtypes = {"a": "int64", "b": "object"} - df = DataFrame(columns=["a", "b"]).astype(dtypes) - expected = df.copy() - df.to_sql(name="test", con=self.conn, index=False, if_exists="replace") - - for result in read_sql_query( - "SELECT * FROM test", - self.conn, - dtype=dtypes, - chunksize=1, - ): - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("dtype_backend", [lib.no_default, "numpy_nullable"]) - @pytest.mark.parametrize("func", ["read_sql", "read_sql_query"]) - def test_read_sql_dtype(self, func, dtype_backend): - # GH#50797 - table = "test" - df = DataFrame({"a": [1, 2, 3], "b": 5}) - df.to_sql(name=table, con=self.conn, index=False, if_exists="replace") - - result = getattr(pd, func)( - f"Select * from {table}", - self.conn, - dtype={"a": np.float64}, - dtype_backend=dtype_backend, - ) - expected = DataFrame( - { - "a": Series([1, 2, 3], dtype=np.float64), - "b": Series( - [5, 5, 5], - dtype="int64" if not dtype_backend == "numpy_nullable" else "Int64", - ), - } - ) - tm.assert_frame_equal(result, expected) - -class TestSQLiteAlchemy(_TestSQLAlchemy): - """ - Test the sqlalchemy backend against an in-memory sqlite database. - - """ - - flavor = "sqlite" - - @classmethod - def setup_engine(cls): - cls.engine = sqlalchemy.create_engine("sqlite:///:memory:") - - @classmethod - def setup_driver(cls): - # sqlite3 is built-in - cls.driver = None + if "mysql" in conn_name or "sqlite" in conn_name: + if dtype_backend == "numpy_nullable": + df = df.astype({"e": "Int64", "f": "Int64"}) + else: + df = df.astype({"e": "int64[pyarrow]", "f": "int64[pyarrow]"}) - def test_keyword_deprecation(self): - # GH 54397 - msg = ( - "Starting with pandas version 3.0 all arguments of to_sql except for the " - "arguments 'name' and 'con' will be keyword-only." - ) - df = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 1, "B": 2, "C": 3}]) - df.to_sql("example", self.conn) + return df - with tm.assert_produces_warning(FutureWarning, match=msg): - df.to_sql("example", self.conn, None, if_exists="replace") + return func - def test_default_type_conversion(self): - df = sql.read_sql_table("types", self.conn) - assert issubclass(df.FloatCol.dtype.type, np.floating) - assert issubclass(df.IntCol.dtype.type, np.integer) +@pytest.mark.parametrize("conn", all_connectable) +def test_chunksize_empty_dtypes(conn, request): + # GH#50245 + conn = request.getfixturevalue(conn) + dtypes = {"a": "int64", "b": "object"} + df = DataFrame(columns=["a", "b"]).astype(dtypes) + expected = df.copy() + df.to_sql(name="test", con=conn, index=False, if_exists="replace") - # sqlite has no boolean type, so integer type is returned - assert issubclass(df.BoolCol.dtype.type, np.integer) + for result in read_sql_query( + "SELECT * FROM test", + conn, + dtype=dtypes, + chunksize=1, + ): + tm.assert_frame_equal(result, expected) - # Int column with NA values stays as float - assert issubclass(df.IntColWithNull.dtype.type, np.floating) - # Non-native Bool column with NA values stays as float - assert issubclass(df.BoolColWithNull.dtype.type, np.floating) +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("dtype_backend", [lib.no_default, "numpy_nullable"]) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_query"]) +def test_read_sql_dtype(conn, request, func, dtype_backend): + # GH#50797 + conn = request.getfixturevalue(conn) + table = "test" + df = DataFrame({"a": [1, 2, 3], "b": 5}) + df.to_sql(name=table, con=conn, index=False, if_exists="replace") - def test_default_date_load(self): - df = sql.read_sql_table("types", self.conn) + result = getattr(pd, func)( + f"Select * from {table}", + conn, + dtype={"a": np.float64}, + dtype_backend=dtype_backend, + ) + expected = DataFrame( + { + "a": Series([1, 2, 3], dtype=np.float64), + "b": Series( + [5, 5, 5], + dtype="int64" if not dtype_backend == "numpy_nullable" else "Int64", + ), + } + ) + tm.assert_frame_equal(result, expected) - # IMPORTANT - sqlite has no native date type, so shouldn't parse, but - assert not issubclass(df.DateCol.dtype.type, np.datetime64) - def test_bigint_warning(self): - # test no warning for BIGINT (to support int64) is raised (GH7433) - df = DataFrame({"a": [1, 2]}, dtype="int64") - assert df.to_sql(name="test_bigintwarning", con=self.conn, index=False) == 2 +def test_keyword_deprecation(sqlite_sqlalchemy_memory_engine): + conn = sqlite_sqlalchemy_memory_engine + # GH 54397 + msg = ( + "Starting with pandas version 3.0 all arguments of to_sql except for the " + "arguments 'name' and 'con' will be keyword-only." + ) + df = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 1, "B": 2, "C": 3}]) + df.to_sql("example", conn) - with tm.assert_produces_warning(None): - sql.read_sql_table("test_bigintwarning", self.conn) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_sql("example", conn, None, if_exists="replace") - def test_valueerror_exception(self): - df = DataFrame({"col1": [1, 2], "col2": [3, 4]}) - with pytest.raises(ValueError, match="Empty table name specified"): - df.to_sql(name="", con=self.conn, if_exists="replace", index=False) - def test_row_object_is_named_tuple(self): - # GH 40682 - # Test for the is_named_tuple() function - # Placed here due to its usage of sqlalchemy +def test_bigint_warning(sqlite_sqlalchemy_memory_engine): + conn = sqlite_sqlalchemy_memory_engine + # test no warning for BIGINT (to support int64) is raised (GH7433) + df = DataFrame({"a": [1, 2]}, dtype="int64") + assert df.to_sql(name="test_bigintwarning", con=conn, index=False) == 2 - from sqlalchemy import ( - Column, - Integer, - String, - ) - from sqlalchemy.orm import ( - declarative_base, - sessionmaker, - ) + with tm.assert_produces_warning(None): + sql.read_sql_table("test_bigintwarning", conn) - BaseModel = declarative_base() - - class Test(BaseModel): - __tablename__ = "test_frame" - id = Column(Integer, primary_key=True) - string_column = Column(String(50)) - - with self.conn.begin(): - BaseModel.metadata.create_all(self.conn) - Session = sessionmaker(bind=self.conn) - with Session() as session: - df = DataFrame({"id": [0, 1], "string_column": ["hello", "world"]}) - assert ( - df.to_sql( - name="test_frame", con=self.conn, index=False, if_exists="replace" - ) - == 2 - ) - session.commit() - test_query = session.query(Test.id, Test.string_column) - df = DataFrame(test_query) - assert list(df.columns) == ["id", "string_column"] +def test_valueerror_exception(sqlite_sqlalchemy_memory_engine): + conn = sqlite_sqlalchemy_memory_engine + df = DataFrame({"col1": [1, 2], "col2": [3, 4]}) + with pytest.raises(ValueError, match="Empty table name specified"): + df.to_sql(name="", con=conn, if_exists="replace", index=False) - def dtype_backend_expected(self, storage, dtype_backend) -> DataFrame: - df = super().dtype_backend_expected(storage, dtype_backend) - if dtype_backend == "numpy_nullable": - df = df.astype({"e": "Int64", "f": "Int64"}) - else: - df = df.astype({"e": "int64[pyarrow]", "f": "int64[pyarrow]"}) - return df +def test_row_object_is_named_tuple(sqlite_sqlalchemy_memory_engine): + conn = sqlite_sqlalchemy_memory_engine + # GH 40682 + # Test for the is_named_tuple() function + # Placed here due to its usage of sqlalchemy - @pytest.mark.parametrize("func", ["read_sql", "read_sql_table"]) - def test_read_sql_dtype_backend_table(self, string_storage, func): - # GH#50048 Not supported for sqlite - pass + from sqlalchemy import ( + Column, + Integer, + String, + ) + from sqlalchemy.orm import ( + declarative_base, + sessionmaker, + ) - def test_read_sql_string_inference(self): - # GH#54430 - pytest.importorskip("pyarrow") - table = "test" - df = DataFrame({"a": ["x", "y"]}) - df.to_sql(table, con=self.conn, index=False, if_exists="replace") + BaseModel = declarative_base() - with pd.option_context("future.infer_string", True): - result = read_sql_table(table, self.conn) + class Test(BaseModel): + __tablename__ = "test_frame" + id = Column(Integer, primary_key=True) + string_column = Column(String(50)) - dtype = "string[pyarrow_numpy]" - expected = DataFrame( - {"a": ["x", "y"]}, dtype=dtype, columns=Index(["a"], dtype=dtype) + with conn.begin(): + BaseModel.metadata.create_all(conn) + Session = sessionmaker(bind=conn) + with Session() as session: + df = DataFrame({"id": [0, 1], "string_column": ["hello", "world"]}) + assert ( + df.to_sql(name="test_frame", con=conn, index=False, if_exists="replace") + == 2 ) + session.commit() + test_query = session.query(Test.id, Test.string_column) + df = DataFrame(test_query) - tm.assert_frame_equal(result, expected) - - def test_roundtripping_datetimes(self): - # GH#54877 - df = DataFrame({"t": [datetime(2020, 12, 31, 12)]}, dtype="datetime64[ns]") - df.to_sql("test", self.conn, if_exists="replace", index=False) - result = pd.read_sql("select * from test", self.conn).iloc[0, 0] - assert result == "2020-12-31 12:00:00.000000" - - -@pytest.mark.db -class TestMySQLAlchemy(_TestSQLAlchemy): - """ - Test the sqlalchemy backend against an MySQL database. + assert list(df.columns) == ["id", "string_column"] - """ - flavor = "mysql" - port = 3306 +def test_read_sql_string_inference(sqlite_sqlalchemy_memory_engine): + conn = sqlite_sqlalchemy_memory_engine + # GH#54430 + pytest.importorskip("pyarrow") + table = "test" + df = DataFrame({"a": ["x", "y"]}) + df.to_sql(table, con=conn, index=False, if_exists="replace") - @classmethod - def setup_engine(cls): - cls.engine = sqlalchemy.create_engine( - f"mysql+{cls.driver}://root@localhost:{cls.port}/pandas", - connect_args=cls.connect_args, - ) + with pd.option_context("future.infer_string", True): + result = read_sql_table(table, conn) - @classmethod - def setup_driver(cls): - pymysql = pytest.importorskip("pymysql") - cls.driver = "pymysql" - cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS} + dtype = "string[pyarrow_numpy]" + expected = DataFrame( + {"a": ["x", "y"]}, dtype=dtype, columns=Index(["a"], dtype=dtype) + ) - def test_default_type_conversion(self): - pass + tm.assert_frame_equal(result, expected) - def dtype_backend_expected(self, storage, dtype_backend) -> DataFrame: - df = super().dtype_backend_expected(storage, dtype_backend) - if dtype_backend == "numpy_nullable": - df = df.astype({"e": "Int64", "f": "Int64"}) - else: - df = df.astype({"e": "int64[pyarrow]", "f": "int64[pyarrow]"}) - return df +def test_roundtripping_datetimes(sqlite_sqlalchemy_memory_engine): + conn = sqlite_sqlalchemy_memory_engine + # GH#54877 + df = DataFrame({"t": [datetime(2020, 12, 31, 12)]}, dtype="datetime64[ns]") + df.to_sql("test", conn, if_exists="replace", index=False) + result = pd.read_sql("select * from test", conn).iloc[0, 0] + assert result == "2020-12-31 12:00:00.000000" @pytest.mark.db -class TestPostgreSQLAlchemy(_TestSQLAlchemy): - """ - Test the sqlalchemy backend against an PostgreSQL database. - - """ - - flavor = "postgresql" - port = 5432 - - @classmethod - def setup_engine(cls): - cls.engine = sqlalchemy.create_engine( - f"postgresql+{cls.driver}://postgres:postgres@localhost:{cls.port}/pandas" - ) - - @classmethod - def setup_driver(cls): - pytest.importorskip("psycopg2") - cls.driver = "psycopg2" - - def test_schema_support(self): - from sqlalchemy.engine import Engine - - # only test this for postgresql (schema's not supported in - # mysql/sqlite) - df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) - - # create a schema - with self.conn.begin(): - self.conn.exec_driver_sql("DROP SCHEMA IF EXISTS other CASCADE;") - self.conn.exec_driver_sql("CREATE SCHEMA other;") - - # write dataframe to different schema's - assert df.to_sql(name="test_schema_public", con=self.conn, index=False) == 2 - assert ( - df.to_sql( - name="test_schema_public_explicit", - con=self.conn, - index=False, - schema="public", - ) - == 2 - ) - assert ( - df.to_sql( - name="test_schema_other", con=self.conn, index=False, schema="other" - ) - == 2 +def test_psycopg2_schema_support(postgresql_psycopg2_engine): + conn = postgresql_psycopg2_engine + + # only test this for postgresql (schema's not supported in + # mysql/sqlite) + df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) + + # create a schema + with conn.connect() as con: + with con.begin(): + con.exec_driver_sql("DROP SCHEMA IF EXISTS other CASCADE;") + con.exec_driver_sql("CREATE SCHEMA other;") + + # write dataframe to different schema's + assert df.to_sql(name="test_schema_public", con=conn, index=False) == 2 + assert ( + df.to_sql( + name="test_schema_public_explicit", + con=conn, + index=False, + schema="public", ) + == 2 + ) + assert ( + df.to_sql(name="test_schema_other", con=conn, index=False, schema="other") == 2 + ) - # read dataframes back in - res1 = sql.read_sql_table("test_schema_public", self.conn) - tm.assert_frame_equal(df, res1) - res2 = sql.read_sql_table("test_schema_public_explicit", self.conn) - tm.assert_frame_equal(df, res2) - res3 = sql.read_sql_table( - "test_schema_public_explicit", self.conn, schema="public" - ) - tm.assert_frame_equal(df, res3) - res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other") - tm.assert_frame_equal(df, res4) - msg = "Table test_schema_other not found" - with pytest.raises(ValueError, match=msg): - sql.read_sql_table("test_schema_other", self.conn, schema="public") + # read dataframes back in + res1 = sql.read_sql_table("test_schema_public", conn) + tm.assert_frame_equal(df, res1) + res2 = sql.read_sql_table("test_schema_public_explicit", conn) + tm.assert_frame_equal(df, res2) + res3 = sql.read_sql_table("test_schema_public_explicit", conn, schema="public") + tm.assert_frame_equal(df, res3) + res4 = sql.read_sql_table("test_schema_other", conn, schema="other") + tm.assert_frame_equal(df, res4) + msg = "Table test_schema_other not found" + with pytest.raises(ValueError, match=msg): + sql.read_sql_table("test_schema_other", conn, schema="public") - # different if_exists options + # different if_exists options - # create a schema - with self.conn.begin(): - self.conn.exec_driver_sql("DROP SCHEMA IF EXISTS other CASCADE;") - self.conn.exec_driver_sql("CREATE SCHEMA other;") + # create a schema + with conn.connect() as con: + with con.begin(): + con.exec_driver_sql("DROP SCHEMA IF EXISTS other CASCADE;") + con.exec_driver_sql("CREATE SCHEMA other;") - # write dataframe with different if_exists options - assert ( - df.to_sql( - name="test_schema_other", con=self.conn, schema="other", index=False - ) - == 2 - ) + # write dataframe with different if_exists options + assert ( + df.to_sql(name="test_schema_other", con=conn, schema="other", index=False) == 2 + ) + df.to_sql( + name="test_schema_other", + con=conn, + schema="other", + index=False, + if_exists="replace", + ) + assert ( df.to_sql( name="test_schema_other", - con=self.conn, + con=conn, schema="other", index=False, - if_exists="replace", - ) - assert ( - df.to_sql( - name="test_schema_other", - con=self.conn, - schema="other", - index=False, - if_exists="append", - ) - == 2 + if_exists="append", ) - res = sql.read_sql_table("test_schema_other", self.conn, schema="other") - tm.assert_frame_equal(concat([df, df], ignore_index=True), res) - - # specifying schema in user-provided meta - - # The schema won't be applied on another Connection - # because of transactional schemas - if isinstance(self.conn, Engine): - engine2 = self.connect() - pdsql = sql.SQLDatabase(engine2, schema="other") - assert pdsql.to_sql(df, "test_schema_other2", index=False) == 2 - assert ( - pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace") - == 2 - ) - assert ( - pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append") - == 2 - ) - res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other") - res2 = pdsql.read_table("test_schema_other2") - tm.assert_frame_equal(res1, res2) + == 2 + ) + res = sql.read_sql_table("test_schema_other", conn, schema="other") + tm.assert_frame_equal(concat([df, df], ignore_index=True), res) - def test_self_join_date_columns(self): - # GH 44421 - from sqlalchemy.engine import Engine - from sqlalchemy.sql import text - create_table = text( - """ - CREATE TABLE person - ( - id serial constraint person_pkey primary key, - created_dt timestamp with time zone - ); +@pytest.mark.db +def test_self_join_date_columns(postgresql_psycopg2_engine): + # GH 44421 + conn = postgresql_psycopg2_engine + from sqlalchemy.sql import text - INSERT INTO person - VALUES (1, '2021-01-01T00:00:00Z'); + create_table = text( """ - ) - if isinstance(self.conn, Engine): - with self.conn.connect() as con: - with con.begin(): - con.execute(create_table) - else: - with self.conn.begin(): - self.conn.execute(create_table) + CREATE TABLE person + ( + id serial constraint person_pkey primary key, + created_dt timestamp with time zone + ); - sql_query = ( - 'SELECT * FROM "person" AS p1 INNER JOIN "person" AS p2 ON p1.id = p2.id;' - ) - result = pd.read_sql(sql_query, self.conn) - expected = DataFrame( - [[1, Timestamp("2021", tz="UTC")] * 2], columns=["id", "created_dt"] * 2 - ) - tm.assert_frame_equal(result, expected) + INSERT INTO person + VALUES (1, '2021-01-01T00:00:00Z'); + """ + ) + with conn.connect() as con: + with con.begin(): + con.execute(create_table) - # Cleanup - with sql.SQLDatabase(self.conn, need_transaction=True) as pandasSQL: - pandasSQL.drop_table("person") + sql_query = ( + 'SELECT * FROM "person" AS p1 INNER JOIN "person" AS p2 ON p1.id = p2.id;' + ) + result = pd.read_sql(sql_query, conn) + expected = DataFrame( + [[1, Timestamp("2021", tz="UTC")] * 2], columns=["id", "created_dt"] * 2 + ) + tm.assert_frame_equal(result, expected) + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("person") -# ----------------------------------------------------------------------------- -# -- Test Sqlite / MySQL fallback +def test_create_and_drop_table(sqlite_sqlalchemy_memory_engine): + conn = sqlite_sqlalchemy_memory_engine + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + pandasSQL = sql.SQLDatabase(conn) -class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest): - """ - Test the fallback mode against an in-memory sqlite database. + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(temp_frame, "drop_test_frame") == 4 - """ + assert pandasSQL.has_table("drop_test_frame") - flavor = "sqlite" + with pandasSQL.run_transaction(): + pandasSQL.drop_table("drop_test_frame") - @pytest.fixture(autouse=True) - def setup_method(self, iris_path, types_data): - self.conn = self.connect() - self.load_iris_data(iris_path) - self.load_types_data(types_data) - self.pandasSQL = sql.SQLiteDatabase(self.conn) + assert not pandasSQL.has_table("drop_test_frame") - def test_read_sql_parameter(self, sql_strings): - self._read_sql_iris_parameter(sql_strings) - def test_read_sql_named_parameter(self, sql_strings): - self._read_sql_iris_named_parameter(sql_strings) +def test_sqlite_datetime_date(sqlite_buildin): + conn = sqlite_buildin + df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) + assert df.to_sql(name="test_date", con=conn, index=False) == 2 + res = read_sql_query("SELECT * FROM test_date", conn) + # comes back as strings + tm.assert_frame_equal(res, df.astype(str)) - def test_to_sql_empty(self, test_frame1): - self._to_sql_empty(test_frame1) - def test_create_and_drop_table(self): - temp_frame = DataFrame( - {"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]} - ) +@pytest.mark.parametrize("tz_aware", [False, True]) +def test_sqlite_datetime_time(tz_aware, sqlite_buildin): + conn = sqlite_buildin + # test support for datetime.time, GH #8341 + if not tz_aware: + tz_times = [time(9, 0, 0), time(9, 1, 30)] + else: + tz_dt = date_range("2013-01-01 09:00:00", periods=2, tz="US/Pacific") + tz_times = Series(tz_dt.to_pydatetime()).map(lambda dt: dt.timetz()) - assert self.pandasSQL.to_sql(temp_frame, "drop_test_frame") == 4 + df = DataFrame(tz_times, columns=["a"]) - assert self.pandasSQL.has_table("drop_test_frame") + assert df.to_sql(name="test_time", con=conn, index=False) == 2 + res = read_sql_query("SELECT * FROM test_time", conn) + # comes back as strings + expected = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(res, expected) - self.pandasSQL.drop_table("drop_test_frame") - assert not self.pandasSQL.has_table("drop_test_frame") +def get_sqlite_column_type(conn, table, column): + recs = conn.execute(f"PRAGMA table_info({table})") + for cid, name, ctype, not_null, default, pk in recs: + if name == column: + return ctype + raise ValueError(f"Table {table}, column {column} not found") - def test_roundtrip(self, test_frame1): - self._roundtrip(test_frame1) - def test_execute_sql(self): - self._execute_sql() +def test_sqlite_test_dtype(sqlite_buildin): + conn = sqlite_buildin + cols = ["A", "B"] + data = [(0.8, True), (0.9, None)] + df = DataFrame(data, columns=cols) + assert df.to_sql(name="dtype_test", con=conn) == 2 + assert df.to_sql(name="dtype_test2", con=conn, dtype={"B": "STRING"}) == 2 - def test_datetime_date(self): - # test support for datetime.date - df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) - assert df.to_sql(name="test_date", con=self.conn, index=False) == 2 - res = read_sql_query("SELECT * FROM test_date", self.conn) - if self.flavor == "sqlite": - # comes back as strings - tm.assert_frame_equal(res, df.astype(str)) - elif self.flavor == "mysql": - tm.assert_frame_equal(res, df) + # sqlite stores Boolean values as INTEGER + assert get_sqlite_column_type(conn, "dtype_test", "B") == "INTEGER" - @pytest.mark.parametrize("tz_aware", [False, True]) - def test_datetime_time(self, tz_aware): - # test support for datetime.time, GH #8341 - if not tz_aware: - tz_times = [time(9, 0, 0), time(9, 1, 30)] - else: - tz_dt = date_range("2013-01-01 09:00:00", periods=2, tz="US/Pacific") - tz_times = Series(tz_dt.to_pydatetime()).map(lambda dt: dt.timetz()) + assert get_sqlite_column_type(conn, "dtype_test2", "B") == "STRING" + msg = r"B \(\) not a string" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="error", con=conn, dtype={"B": bool}) - df = DataFrame(tz_times, columns=["a"]) + # single dtype + assert df.to_sql(name="single_dtype_test", con=conn, dtype="STRING") == 2 + assert get_sqlite_column_type(conn, "single_dtype_test", "A") == "STRING" + assert get_sqlite_column_type(conn, "single_dtype_test", "B") == "STRING" - assert df.to_sql(name="test_time", con=self.conn, index=False) == 2 - res = read_sql_query("SELECT * FROM test_time", self.conn) - if self.flavor == "sqlite": - # comes back as strings - expected = df.map(lambda _: _.strftime("%H:%M:%S.%f")) - tm.assert_frame_equal(res, expected) - def _get_index_columns(self, tbl_name): - ixs = sql.read_sql_query( - "SELECT * FROM sqlite_master WHERE type = 'index' " - f"AND tbl_name = '{tbl_name}'", - self.conn, - ) - ix_cols = [] - for ix_name in ixs.name: - ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn) - ix_cols.append(ix_info.name.tolist()) - return ix_cols - - def test_to_sql_save_index(self): - self._to_sql_save_index() - - def test_transactions(self): - self._transaction_test() - - def _get_sqlite_column_type(self, table, column): - recs = self.conn.execute(f"PRAGMA table_info({table})") - for cid, name, ctype, not_null, default, pk in recs: - if name == column: - return ctype - raise ValueError(f"Table {table}, column {column} not found") - - def test_dtype(self): - if self.flavor == "mysql": - pytest.skip("Not applicable to MySQL legacy") - cols = ["A", "B"] - data = [(0.8, True), (0.9, None)] - df = DataFrame(data, columns=cols) - assert df.to_sql(name="dtype_test", con=self.conn) == 2 - assert df.to_sql(name="dtype_test2", con=self.conn, dtype={"B": "STRING"}) == 2 - - # sqlite stores Boolean values as INTEGER - assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER" - - assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING" - msg = r"B \(\) not a string" - with pytest.raises(ValueError, match=msg): - df.to_sql(name="error", con=self.conn, dtype={"B": bool}) - - # single dtype - assert df.to_sql(name="single_dtype_test", con=self.conn, dtype="STRING") == 2 - assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING" - assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING" - - def test_notna_dtype(self): - if self.flavor == "mysql": - pytest.skip("Not applicable to MySQL legacy") - - cols = { - "Bool": Series([True, None]), - "Date": Series([datetime(2012, 5, 1), None]), - "Int": Series([1, None], dtype="object"), - "Float": Series([1.1, None]), - } - df = DataFrame(cols) +def test_sqlite_notna_dtype(sqlite_buildin): + conn = sqlite_buildin + cols = { + "Bool": Series([True, None]), + "Date": Series([datetime(2012, 5, 1), None]), + "Int": Series([1, None], dtype="object"), + "Float": Series([1.1, None]), + } + df = DataFrame(cols) - tbl = "notna_dtype_test" - assert df.to_sql(name=tbl, con=self.conn) == 2 + tbl = "notna_dtype_test" + assert df.to_sql(name=tbl, con=conn) == 2 - assert self._get_sqlite_column_type(tbl, "Bool") == "INTEGER" - assert self._get_sqlite_column_type(tbl, "Date") == "TIMESTAMP" - assert self._get_sqlite_column_type(tbl, "Int") == "INTEGER" - assert self._get_sqlite_column_type(tbl, "Float") == "REAL" + assert get_sqlite_column_type(conn, tbl, "Bool") == "INTEGER" + assert get_sqlite_column_type(conn, tbl, "Date") == "TIMESTAMP" + assert get_sqlite_column_type(conn, tbl, "Int") == "INTEGER" + assert get_sqlite_column_type(conn, tbl, "Float") == "REAL" - def test_illegal_names(self): - # For sqlite, these should work fine - df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) - msg = "Empty table or column name specified" - with pytest.raises(ValueError, match=msg): - df.to_sql(name="", con=self.conn) - - for ndx, weird_name in enumerate( - [ - "test_weird_name]", - "test_weird_name[", - "test_weird_name`", - 'test_weird_name"', - "test_weird_name'", - "_b.test_weird_name_01-30", - '"_b.test_weird_name_01-30"', - "99beginswithnumber", - "12345", - "\xe9", - ] - ): - assert df.to_sql(name=weird_name, con=self.conn) == 2 - sql.table_exists(weird_name, self.conn) +def test_sqlite_illegal_names(sqlite_buildin): + # For sqlite, these should work fine + conn = sqlite_buildin + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + + msg = "Empty table or column name specified" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="", con=conn) + + for ndx, weird_name in enumerate( + [ + "test_weird_name]", + "test_weird_name[", + "test_weird_name`", + 'test_weird_name"', + "test_weird_name'", + "_b.test_weird_name_01-30", + '"_b.test_weird_name_01-30"', + "99beginswithnumber", + "12345", + "\xe9", + ] + ): + assert df.to_sql(name=weird_name, con=conn) == 2 + sql.table_exists(weird_name, conn) - df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name]) - c_tbl = f"test_weird_col_name{ndx:d}" - assert df2.to_sql(name=c_tbl, con=self.conn) == 2 - sql.table_exists(c_tbl, self.conn) + df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name]) + c_tbl = f"test_weird_col_name{ndx:d}" + assert df2.to_sql(name=c_tbl, con=conn) == 2 + sql.table_exists(c_tbl, conn) # ----------------------------------------------------------------------------- @@ -3630,227 +3623,222 @@ def tquery(query, con=None): return None if res is None else list(res) -class TestXSQLite: - def drop_table(self, table_name, conn): - cur = conn.cursor() - cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}") - conn.commit() +def test_xsqlite_basic(sqlite_buildin): + frame = tm.makeTimeDataFrame() + assert sql.to_sql(frame, name="test_table", con=sqlite_buildin, index=False) == 30 + result = sql.read_sql("select * from test_table", sqlite_buildin) - def test_basic(self, sqlite_buildin): - frame = tm.makeTimeDataFrame() - assert ( - sql.to_sql(frame, name="test_table", con=sqlite_buildin, index=False) == 30 - ) - result = sql.read_sql("select * from test_table", sqlite_buildin) + # HACK! Change this once indexes are handled properly. + result.index = frame.index - # HACK! Change this once indexes are handled properly. - result.index = frame.index + expected = frame + tm.assert_frame_equal(result, frame) - expected = frame - tm.assert_frame_equal(result, frame) + frame["txt"] = ["a"] * len(frame) + frame2 = frame.copy() + new_idx = Index(np.arange(len(frame2)), dtype=np.int64) + 10 + frame2["Idx"] = new_idx.copy() + assert sql.to_sql(frame2, name="test_table2", con=sqlite_buildin, index=False) == 30 + result = sql.read_sql("select * from test_table2", sqlite_buildin, index_col="Idx") + expected = frame.copy() + expected.index = new_idx + expected.index.name = "Idx" + tm.assert_frame_equal(expected, result) - frame["txt"] = ["a"] * len(frame) - frame2 = frame.copy() - new_idx = Index(np.arange(len(frame2)), dtype=np.int64) + 10 - frame2["Idx"] = new_idx.copy() - assert ( - sql.to_sql(frame2, name="test_table2", con=sqlite_buildin, index=False) - == 30 - ) - result = sql.read_sql( - "select * from test_table2", sqlite_buildin, index_col="Idx" - ) - expected = frame.copy() - expected.index = new_idx - expected.index.name = "Idx" - tm.assert_frame_equal(expected, result) - - def test_write_row_by_row(self, sqlite_buildin): - frame = tm.makeTimeDataFrame() - frame.iloc[0, 0] = np.nan - create_sql = sql.get_schema(frame, "test") - cur = sqlite_buildin.cursor() - cur.execute(create_sql) - ins = "INSERT INTO test VALUES (%s, %s, %s, %s)" - for _, row in frame.iterrows(): - fmt_sql = format_query(ins, *row) - tquery(fmt_sql, con=sqlite_buildin) +def test_xsqlite_write_row_by_row(sqlite_buildin): + frame = tm.makeTimeDataFrame() + frame.iloc[0, 0] = np.nan + create_sql = sql.get_schema(frame, "test") + cur = sqlite_buildin.cursor() + cur.execute(create_sql) - sqlite_buildin.commit() + ins = "INSERT INTO test VALUES (%s, %s, %s, %s)" + for _, row in frame.iterrows(): + fmt_sql = format_query(ins, *row) + tquery(fmt_sql, con=sqlite_buildin) - result = sql.read_sql("select * from test", con=sqlite_buildin) - result.index = frame.index - tm.assert_frame_equal(result, frame, rtol=1e-3) + sqlite_buildin.commit() - def test_execute(self, sqlite_buildin): - frame = tm.makeTimeDataFrame() - create_sql = sql.get_schema(frame, "test") - cur = sqlite_buildin.cursor() - cur.execute(create_sql) - ins = "INSERT INTO test VALUES (?, ?, ?, ?)" - - row = frame.iloc[0] - with sql.pandasSQL_builder(sqlite_buildin) as pandas_sql: - pandas_sql.execute(ins, tuple(row)) - sqlite_buildin.commit() - - result = sql.read_sql("select * from test", sqlite_buildin) - result.index = frame.index[:1] - tm.assert_frame_equal(result, frame[:1]) - - def test_schema(self, sqlite_buildin): - frame = tm.makeTimeDataFrame() - create_sql = sql.get_schema(frame, "test") - lines = create_sql.splitlines() - for line in lines: - tokens = line.split(" ") - if len(tokens) == 2 and tokens[0] == "A": - assert tokens[1] == "DATETIME" - - create_sql = sql.get_schema(frame, "test", keys=["A", "B"]) - lines = create_sql.splitlines() - assert 'PRIMARY KEY ("A", "B")' in create_sql - cur = sqlite_buildin.cursor() - cur.execute(create_sql) + result = sql.read_sql("select * from test", con=sqlite_buildin) + result.index = frame.index + tm.assert_frame_equal(result, frame, rtol=1e-3) - def test_execute_fail(self, sqlite_buildin): - create_sql = """ - CREATE TABLE test - ( - a TEXT, - b TEXT, - c REAL, - PRIMARY KEY (a, b) - ); - """ - cur = sqlite_buildin.cursor() + +def test_xsqlite_execute(sqlite_buildin): + frame = tm.makeTimeDataFrame() + create_sql = sql.get_schema(frame, "test") + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + ins = "INSERT INTO test VALUES (?, ?, ?, ?)" + + row = frame.iloc[0] + with sql.pandasSQL_builder(sqlite_buildin) as pandas_sql: + pandas_sql.execute(ins, tuple(row)) + sqlite_buildin.commit() + + result = sql.read_sql("select * from test", sqlite_buildin) + result.index = frame.index[:1] + tm.assert_frame_equal(result, frame[:1]) + + +def test_xsqlite_schema(sqlite_buildin): + frame = tm.makeTimeDataFrame() + create_sql = sql.get_schema(frame, "test") + lines = create_sql.splitlines() + for line in lines: + tokens = line.split(" ") + if len(tokens) == 2 and tokens[0] == "A": + assert tokens[1] == "DATETIME" + + create_sql = sql.get_schema(frame, "test", keys=["A", "B"]) + lines = create_sql.splitlines() + assert 'PRIMARY KEY ("A", "B")' in create_sql + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + + +def test_xsqlite_execute_fail(sqlite_buildin): + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a, b) + ); + """ + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + + with sql.pandasSQL_builder(sqlite_buildin) as pandas_sql: + pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)') + pandas_sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)') + + with pytest.raises(sql.DatabaseError, match="Execution failed on sql"): + pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 7)') + + +def test_xsqlite_execute_closed_connection(): + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a, b) + ); + """ + with contextlib.closing(sqlite3.connect(":memory:")) as conn: + cur = conn.cursor() cur.execute(create_sql) - with sql.pandasSQL_builder(sqlite_buildin) as pandas_sql: + with sql.pandasSQL_builder(conn) as pandas_sql: pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)') - pandas_sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)') - with pytest.raises(sql.DatabaseError, match="Execution failed on sql"): - pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 7)') + msg = "Cannot operate on a closed database." + with pytest.raises(sqlite3.ProgrammingError, match=msg): + tquery("select * from test", con=conn) - def test_execute_closed_connection(self): - create_sql = """ - CREATE TABLE test - ( - a TEXT, - b TEXT, - c REAL, - PRIMARY KEY (a, b) - ); - """ - with contextlib.closing(sqlite3.connect(":memory:")) as conn: - cur = conn.cursor() - cur.execute(create_sql) - - with sql.pandasSQL_builder(conn) as pandas_sql: - pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)') - - msg = "Cannot operate on a closed database." - with pytest.raises(sqlite3.ProgrammingError, match=msg): - tquery("select * from test", con=conn) - - def test_keyword_as_column_names(self, sqlite_buildin): - df = DataFrame({"From": np.ones(5)}) - assert sql.to_sql(df, con=sqlite_buildin, name="testkeywords", index=False) == 5 - - def test_onecolumn_of_integer(self, sqlite_buildin): - # GH 3628 - # a column_of_integers dataframe should transfer well to sql - - mono_df = DataFrame([1, 2], columns=["c0"]) - assert sql.to_sql(mono_df, con=sqlite_buildin, name="mono_df", index=False) == 2 - # computing the sum via sql - con_x = sqlite_buildin - the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df")) - # it should not fail, and gives 3 ( Issue #3628 ) - assert the_sum == 3 - - result = sql.read_sql("select * from mono_df", con_x) - tm.assert_frame_equal(result, mono_df) - - def test_if_exists(self, sqlite_buildin): - df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]}) - df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]}) - table_name = "table_if_exists" - sql_select = f"SELECT * FROM {table_name}" - - msg = "'notvalidvalue' is not valid for if_exists" - with pytest.raises(ValueError, match=msg): - sql.to_sql( - frame=df_if_exists_1, - con=sqlite_buildin, - name=table_name, - if_exists="notvalidvalue", - ) - self.drop_table(table_name, sqlite_buildin) - # test if_exists='fail' +def test_xsqlite_keyword_as_column_names(sqlite_buildin): + df = DataFrame({"From": np.ones(5)}) + assert sql.to_sql(df, con=sqlite_buildin, name="testkeywords", index=False) == 5 + + +def test_xsqlite_onecolumn_of_integer(sqlite_buildin): + # GH 3628 + # a column_of_integers dataframe should transfer well to sql + + mono_df = DataFrame([1, 2], columns=["c0"]) + assert sql.to_sql(mono_df, con=sqlite_buildin, name="mono_df", index=False) == 2 + # computing the sum via sql + con_x = sqlite_buildin + the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df")) + # it should not fail, and gives 3 ( Issue #3628 ) + assert the_sum == 3 + + result = sql.read_sql("select * from mono_df", con_x) + tm.assert_frame_equal(result, mono_df) + + +def test_xsqlite_if_exists(sqlite_buildin): + df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]}) + df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]}) + table_name = "table_if_exists" + sql_select = f"SELECT * FROM {table_name}" + + msg = "'notvalidvalue' is not valid for if_exists" + with pytest.raises(ValueError, match=msg): sql.to_sql( - frame=df_if_exists_1, con=sqlite_buildin, name=table_name, if_exists="fail" + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="notvalidvalue", ) - msg = "Table 'table_if_exists' already exists" - with pytest.raises(ValueError, match=msg): - sql.to_sql( - frame=df_if_exists_1, - con=sqlite_buildin, - name=table_name, - if_exists="fail", - ) - # test if_exists='replace' + drop_table(table_name, sqlite_buildin) + + # test if_exists='fail' + sql.to_sql( + frame=df_if_exists_1, con=sqlite_buildin, name=table_name, if_exists="fail" + ) + msg = "Table 'table_if_exists' already exists" + with pytest.raises(ValueError, match=msg): sql.to_sql( frame=df_if_exists_1, con=sqlite_buildin, name=table_name, + if_exists="fail", + ) + # test if_exists='replace' + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="replace", + index=False, + ) + assert tquery(sql_select, con=sqlite_buildin) == [(1, "A"), (2, "B")] + assert ( + sql.to_sql( + frame=df_if_exists_2, + con=sqlite_buildin, + name=table_name, if_exists="replace", index=False, ) - assert tquery(sql_select, con=sqlite_buildin) == [(1, "A"), (2, "B")] - assert ( - sql.to_sql( - frame=df_if_exists_2, - con=sqlite_buildin, - name=table_name, - if_exists="replace", - index=False, - ) - == 3 - ) - assert tquery(sql_select, con=sqlite_buildin) == [(3, "C"), (4, "D"), (5, "E")] - self.drop_table(table_name, sqlite_buildin) + == 3 + ) + assert tquery(sql_select, con=sqlite_buildin) == [(3, "C"), (4, "D"), (5, "E")] + drop_table(table_name, sqlite_buildin) - # test if_exists='append' - assert ( - sql.to_sql( - frame=df_if_exists_1, - con=sqlite_buildin, - name=table_name, - if_exists="fail", - index=False, - ) - == 2 + # test if_exists='append' + assert ( + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="fail", + index=False, ) - assert tquery(sql_select, con=sqlite_buildin) == [(1, "A"), (2, "B")] - assert ( - sql.to_sql( - frame=df_if_exists_2, - con=sqlite_buildin, - name=table_name, - if_exists="append", - index=False, - ) - == 3 + == 2 + ) + assert tquery(sql_select, con=sqlite_buildin) == [(1, "A"), (2, "B")] + assert ( + sql.to_sql( + frame=df_if_exists_2, + con=sqlite_buildin, + name=table_name, + if_exists="append", + index=False, ) - assert tquery(sql_select, con=sqlite_buildin) == [ - (1, "A"), - (2, "B"), - (3, "C"), - (4, "D"), - (5, "E"), - ] - self.drop_table(table_name, sqlite_buildin) + == 3 + ) + assert tquery(sql_select, con=sqlite_buildin) == [ + (1, "A"), + (2, "B"), + (3, "C"), + (4, "D"), + (5, "E"), + ] + drop_table(table_name, sqlite_buildin) From 835233180aff90d8ee4aa65a95f6daded84a6cfd Mon Sep 17 00:00:00 2001 From: Abdullah Ihsan Secer Date: Tue, 19 Sep 2023 11:40:15 +0100 Subject: [PATCH 86/93] TST: Test GroupBy.__getitem__ with a column from grouper (#55193) --- pandas/tests/groupby/test_grouping.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index d05b60fd56b5f..9e899bf453548 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -131,6 +131,20 @@ def test_getitem_single_column(self): tm.assert_series_equal(result, expected) + @pytest.mark.parametrize( + "func", [lambda x: x.sum(), lambda x: x.agg(lambda y: y.sum())] + ) + def test_getitem_from_grouper(self, func): + # GH 50383 + df = DataFrame({"a": [1, 1, 2], "b": 3, "c": 4, "d": 5}) + gb = df.groupby(["a", "b"])[["a", "c"]] + + idx = MultiIndex.from_tuples([(1, 3), (2, 3)], names=["a", "b"]) + expected = DataFrame({"a": [2, 2], "c": [8, 4]}, index=idx) + result = func(gb) + + tm.assert_frame_equal(result, expected) + def test_indices_grouped_by_tuple_with_lambda(self): # GH 36158 df = DataFrame( From 6a65cfdccef0974bf5291c0a6c5e237968d1f034 Mon Sep 17 00:00:00 2001 From: Thomas Li <47963215+lithomas1@users.noreply.github.com> Date: Tue, 19 Sep 2023 11:33:34 -0400 Subject: [PATCH 87/93] DEPS: Bump to 1.26 proper for Python 3.12 (#55175) * DEPS: Bump to 1.26 proper for Python 3.12 * Update unit-tests.yml * Update unit-tests.yml * revert hack --- pyproject.toml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7807a6cc6368d..4e1c77413efda 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,8 +10,7 @@ requires = [ # we don't want to force users to compile with 1.25 though # (Ideally, in the future, though, oldest-supported-numpy can be dropped when our min numpy is 1.25.x) "oldest-supported-numpy>=2022.8.16; python_version<'3.12'", - # TODO: This needs to be updated when the official numpy 1.26 comes out - "numpy>=1.26.0b1; python_version>='3.12'", + "numpy>=1.26.0; python_version>='3.12'", "versioneer[toml]" ] @@ -32,8 +31,7 @@ requires-python = '>=3.9' dependencies = [ "numpy>=1.22.4; python_version<'3.11'", "numpy>=1.23.2; python_version=='3.11'", - # TODO: This needs to be updated when the official numpy 1.26 comes out - "numpy>=1.26.0b1; python_version>='3.12'", + "numpy>=1.26.0; python_version>='3.12'", "python-dateutil>=2.8.2", "pytz>=2020.1", "tzdata>=2022.1" From 20949e287d2688ef3e0e76bb39143db42fcbb813 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Tue, 19 Sep 2023 11:46:13 -0400 Subject: [PATCH 88/93] CLN: do not import Axes/Artist/Figure from matplotlib.pyplot (#55192) CLN: do not import Axes/Artist from matplotlib.pyplot --- pandas/_testing/asserters.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index 0591394f5d9ed..d4e7e196dc2d4 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -416,7 +416,8 @@ def assert_attr_equal(attr: str, left, right, obj: str = "Attributes") -> None: def assert_is_valid_plot_return_object(objs) -> None: - import matplotlib.pyplot as plt + from matplotlib.artist import Artist + from matplotlib.axes import Axes if isinstance(objs, (Series, np.ndarray)): for el in objs.ravel(): @@ -424,14 +425,14 @@ def assert_is_valid_plot_return_object(objs) -> None: "one of 'objs' is not a matplotlib Axes instance, " f"type encountered {repr(type(el).__name__)}" ) - assert isinstance(el, (plt.Axes, dict)), msg + assert isinstance(el, (Axes, dict)), msg else: msg = ( "objs is neither an ndarray of Artist instances nor a single " "ArtistArtist instance, tuple, or dict, 'objs' is a " f"{repr(type(objs).__name__)}" ) - assert isinstance(objs, (plt.Artist, tuple, dict)), msg + assert isinstance(objs, (Artist, tuple, dict)), msg def assert_is_sorted(seq) -> None: From 696d21b5fea1c768ee833c3f67db321f9d329921 Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Tue, 19 Sep 2023 11:47:06 -0400 Subject: [PATCH 89/93] TYP: Allow None in Period.strftime (#55190) * TYP: Allow None in strftime * Change pyi * Add description to docstring --- pandas/_libs/tslibs/period.pyi | 2 +- pandas/_libs/tslibs/period.pyx | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pandas/_libs/tslibs/period.pyi b/pandas/_libs/tslibs/period.pyi index c85865fea8fd0..a4aecd2ce0a09 100644 --- a/pandas/_libs/tslibs/period.pyi +++ b/pandas/_libs/tslibs/period.pyi @@ -90,7 +90,7 @@ class Period(PeriodMixin): def _from_ordinal(cls, ordinal: int, freq) -> Period: ... @classmethod def now(cls, freq: Frequency = ...) -> Period: ... - def strftime(self, fmt: str) -> str: ... + def strftime(self, fmt: str | None) -> str: ... def to_timestamp( self, freq: str | BaseOffset | None = ..., diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index c37e9cd7ef1f3..21937ab2519e4 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -2511,11 +2511,12 @@ cdef class _Period(PeriodMixin): object_state = None, self.freq, self.ordinal return (Period, object_state) - def strftime(self, fmt: str) -> str: + def strftime(self, fmt: str | None) -> str: r""" Returns a formatted string representation of the :class:`Period`. - ``fmt`` must be a string containing one or several directives. + ``fmt`` must be ``None`` or a string containing one or several directives. + When ``None``, the format will be determined from the frequency of the Period. The method recognizes the same directives as the :func:`time.strftime` function of the standard Python distribution, as well as the specific additional directives ``%f``, ``%F``, ``%q``, ``%l``, ``%u``, ``%n``. From ef5c01673510341d9045d492027c46ccaa246f13 Mon Sep 17 00:00:00 2001 From: kgmuzungu Date: Tue, 19 Sep 2023 17:50:04 +0200 Subject: [PATCH 90/93] DOC: add alternative to docstring of deprecated Series.bool() method (#55168) * Fix for series boolean documentation * one blank line removed. docstring error * Update pandas/core/generic.py fixing text formating Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> * Update pandas/core/generic.py text formatting fixed Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --------- Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> --- pandas/core/generic.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 271ad40a98272..d6508cc43a7c8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1526,7 +1526,8 @@ def bool(self) -> bool_t: .. deprecated:: 2.1.0 - bool is deprecated and will be removed in future version of pandas + bool is deprecated and will be removed in future version of pandas. + For ``Series`` use ``pandas.Series.item``. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that @@ -1556,6 +1557,14 @@ def bool(self) -> bool_t: True >>> pd.DataFrame({'col': [False]}).bool() # doctest: +SKIP False + + This is an alternative method and will only work + for single element objects with a boolean value: + + >>> pd.Series([True]).item() # doctest: +SKIP + True + >>> pd.Series([False]).item() # doctest: +SKIP + False """ warnings.warn( From cb5b2e6f5280df2d26b0a9550828c4bdf1098053 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 19 Sep 2023 10:21:30 -0700 Subject: [PATCH 91/93] BUG: RecursionError in loc.setitem (#55201) --- doc/source/whatsnew/v2.2.0.rst | 1 + pandas/core/dtypes/cast.py | 18 ++++++++++++++++++ pandas/core/internals/blocks.py | 6 ++++++ pandas/tests/indexing/test_loc.py | 8 ++++++++ 4 files changed, 33 insertions(+) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 0fc4afc95a2ce..24bed22b3a3fe 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -266,6 +266,7 @@ Interval Indexing ^^^^^^^^ - Bug in :meth:`Index.difference` not returning a unique set of values when ``other`` is empty or ``other`` is considered non-comparable (:issue:`55113`) +- Bug in setting :class:`Categorical` values into a :class:`DataFrame` with numpy dtypes raising ``RecursionError`` (:issue:`52927`) - Missing diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 1d5db123068e2..74e785be06356 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -68,6 +68,7 @@ PeriodDtype, ) from pandas.core.dtypes.generic import ( + ABCExtensionArray, ABCIndex, ABCSeries, ) @@ -1772,6 +1773,23 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: return casted raise LossySetitemError + elif isinstance(element, ABCExtensionArray) and isinstance( + element.dtype, CategoricalDtype + ): + # GH#52927 setting Categorical value into non-EA frame + # TODO: general-case for EAs? + try: + casted = element.astype(dtype) + except (ValueError, TypeError): + raise LossySetitemError + # Check for cases of either + # a) lossy overflow/rounding or + # b) semantic changes like dt64->int64 + comp = casted == element + if not comp.all(): + raise LossySetitemError + return casted + # Anything other than integer we cannot hold raise LossySetitemError if ( diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 66b01dfb59f7f..30f6507d02484 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -460,6 +460,12 @@ def coerce_to_target_dtype(self, other, warn_on_upcast: bool = False) -> Block: and will receive the same block """ new_dtype = find_result_type(self.values.dtype, other) + if new_dtype == self.dtype: + # GH#52927 avoid RecursionError + raise AssertionError( + "Something has gone wrong, please report a bug at " + "https://github.com/pandas-dev/pandas/issues" + ) # In a future version of pandas, the default will be that # setting `nan` into an integer series won't raise. diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index d0b6adfda0241..a2693c85e507f 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1657,6 +1657,14 @@ def test_loc_setitem_range_key(self, frame_or_series): expected = frame_or_series([0, 1, 10, 9, 11], index=obj.index) tm.assert_equal(obj, expected) + def test_loc_setitem_numpy_frame_categorical_value(self): + # GH#52927 + df = DataFrame({"a": [1, 1, 1, 1, 1], "b": ["a", "a", "a", "a", "a"]}) + df.loc[1:2, "a"] = Categorical([2, 2], categories=[1, 2]) + + expected = DataFrame({"a": [1, 2, 2, 1, 1], "b": ["a", "a", "a", "a", "a"]}) + tm.assert_frame_equal(df, expected) + class TestLocWithEllipsis: @pytest.fixture(params=[tm.loc, tm.iloc]) From f1bf7d4b30a162d7b52417c918e3cf1cc6c14863 Mon Sep 17 00:00:00 2001 From: AG <98327736+ggold7046@users.noreply.github.com> Date: Wed, 20 Sep 2023 00:18:49 +0530 Subject: [PATCH 92/93] Modified doc/make.py to run sphinx-build -b linkcheck (#54265) * Modified doc/make.py to run sphinx-build -b linkcheck * Update make.py * Update make.py * Update make.py * Update make.py * Update make.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update doc/make.py Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> * Update make.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update make.py * Update doc/make.py Co-authored-by: Philip Meier * Update doc/make.py Co-authored-by: Philip Meier * Update make.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Co-authored-by: Philip Meier --- doc/make.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/make.py b/doc/make.py index 9db4ea406bc1f..23b18d69acc04 100755 --- a/doc/make.py +++ b/doc/make.py @@ -123,14 +123,14 @@ def _sphinx_build(self, kind: str): Parameters ---------- - kind : {'html', 'latex'} + kind : {'html', 'latex', 'linkcheck'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') """ - if kind not in ("html", "latex"): - raise ValueError(f"kind must be html or latex, not {kind}") + if kind not in ("html", "latex", "linkcheck"): + raise ValueError(f"kind must be html, latex or linkcheck, not {kind}") cmd = ["sphinx-build", "-b", kind] if self.num_jobs: @@ -288,6 +288,12 @@ def zip_html(self): os.chdir(dirname) self._run_os("zip", zip_fname, "-r", "-q", *fnames) + def linkcheck(self): + """ + Check for broken links in the documentation. + """ + return self._sphinx_build("linkcheck") + def main(): cmds = [method for method in dir(DocBuilder) if not method.startswith("_")] From 6d58277feef818bb488dbd437f71aaf0bfb5e412 Mon Sep 17 00:00:00 2001 From: Sergey Zakharov Date: Wed, 20 Sep 2023 00:04:56 +0400 Subject: [PATCH 93/93] Fix NameError in 'Evaluation order matters' (#55198) * Fix NameError in 'Evaluation order matters' This code in 'Indexing and selecting data' of doc wouldn't execute * Update indexing.rst Another 'optiion_context' without 'pd.' --- doc/source/user_guide/indexing.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index 52bc43f52b1d3..7541cf3c8af3c 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1837,7 +1837,7 @@ This however is operating on a copy and will not work. :okwarning: :okexcept: - with option_context('mode.chained_assignment','warn'): + with pd.option_context('mode.chained_assignment','warn'): dfb[dfb['a'].str.startswith('o')]['c'] = 42 A chained assignment can also crop up in setting in a mixed dtype frame. @@ -1879,7 +1879,7 @@ Last, the subsequent example will **not** work at all, and so should be avoided: :okwarning: :okexcept: - with option_context('mode.chained_assignment','raise'): + with pd.option_context('mode.chained_assignment','raise'): dfd.loc[0]['a'] = 1111 .. warning::