Skip to content

Commit

Permalink
DEPR: Series/DataFrame/HDFStore.iteritems() (#45321)
Browse files Browse the repository at this point in the history
  • Loading branch information
mroeschke authored Jan 16, 2022
1 parent e84b9ee commit e255e56
Show file tree
Hide file tree
Showing 12 changed files with 57 additions and 16 deletions.
2 changes: 2 additions & 0 deletions doc/source/user_guide/scale.rst
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,7 @@ column names and dtypes. That's because Dask hasn't actually read the data yet.
Rather than executing immediately, doing operations build up a **task graph**.

.. ipython:: python
:okwarning:
ddf
ddf["name"]
Expand Down Expand Up @@ -333,6 +334,7 @@ known automatically. In this case, since we created the parquet files manually,
we need to supply the divisions manually.

.. ipython:: python
:okwarning:
N = 12
starts = [f"20{i:>02d}-01-01" for i in range(N)]
Expand Down
1 change: 1 addition & 0 deletions doc/source/whatsnew/v1.5.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ Other API changes
Deprecations
~~~~~~~~~~~~
- Deprecated the keyword ``line_terminator`` in :meth:`DataFrame.to_csv` and :meth:`Series.to_csv`, use ``lineterminator`` instead; this is for consistency with :func:`read_csv` and the standard library 'csv' module (:issue:`9568`)
- Deprecated :meth:`DataFrame.iteritems`, :meth:`Series.iteritems`, :meth:`HDFStore.iteritems` in favor of :meth:`DataFrame.items`, :meth:`Series.items`, :meth:`HDFStore.items` (:issue:`45321`)
-

.. ---------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/sparse/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def to_coo(self):
dtype = dtype.subtype

cols, rows, data = [], [], []
for col, (_, ser) in enumerate(self._parent.iteritems()):
for col, (_, ser) in enumerate(self._parent.items()):
sp_arr = ser.array
if sp_arr.fill_value != 0:
raise ValueError("fill value must be 0 when converting to COO matrix")
Expand Down
6 changes: 6 additions & 0 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -1276,6 +1276,12 @@ def items(self) -> Iterable[tuple[Hashable, Series]]:

@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[tuple[Hashable, Series]]:
warnings.warn(
"iteritems is deprecated and will be removed in a future version. "
"Use .items instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
yield from self.items()

def iterrows(self) -> Iterable[tuple[Hashable, Series]]:
Expand Down
17 changes: 7 additions & 10 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1989,10 +1989,6 @@ def items(self):
for h in self._info_axis:
yield h, self[h]

@doc(items)
def iteritems(self):
return self.items()

def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
Expand Down Expand Up @@ -5855,19 +5851,20 @@ def astype(
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)

for col_name in dtype.keys():
# GH#44417 cast to Series so we can use .iat below, which will be
# robust in case we
from pandas import Series

dtype_ser = Series(dtype, dtype=object)

for col_name in dtype_ser.index:
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument. "
f"'{col_name}' not found in columns."
)

# GH#44417 cast to Series so we can use .iat below, which will be
# robust in case we
from pandas import Series

dtype_ser = Series(dtype, dtype=object)
dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False)

results = []
Expand Down
6 changes: 6 additions & 0 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -1705,6 +1705,12 @@ def items(self) -> Iterable[tuple[Hashable, Any]]:

@Appender(items.__doc__)
def iteritems(self) -> Iterable[tuple[Hashable, Any]]:
warnings.warn(
"iteritems is deprecated and will be removed in a future version. "
"Use .items instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.items()

# ----------------------------------------------------------------------
Expand Down
12 changes: 11 additions & 1 deletion pandas/io/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,17 @@ def items(self):
for g in self.groups():
yield g._v_pathname, g

iteritems = items
def iteritems(self):
"""
iterate on key->group
"""
warnings.warn(
"iteritems is deprecated and will be removed in a future version. "
"Use .items instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
yield from self.items()

def open(self, mode: str = "a", **kwargs):
"""
Expand Down
5 changes: 5 additions & 0 deletions pandas/tests/frame/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,3 +366,8 @@ def test_inspect_getmembers(self):
df = DataFrame()
with tm.assert_produces_warning(None):
inspect.getmembers(df)

def test_dataframe_iteritems_deprecated(self):
df = DataFrame([1])
with tm.assert_produces_warning(FutureWarning):
next(df.iteritems())
2 changes: 1 addition & 1 deletion pandas/tests/groupby/test_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -843,7 +843,7 @@ def test_apply_series_return_dataframe_groups():
)

def most_common_values(df):
return Series({c: s.value_counts().index[0] for c, s in df.iteritems()})
return Series({c: s.value_counts().index[0] for c, s in df.items()})

result = tdf.groupby("day").apply(most_common_values)["userId"]
expected = Series(
Expand Down
9 changes: 9 additions & 0 deletions pandas/tests/io/pytables/test_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -1011,3 +1011,12 @@ def test_to_hdf_with_object_column_names(setup_path):
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where=f"index = [{df.index[0]}]")
assert len(result)


def test_hdfstore_iteritems_deprecated(setup_path):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put("table", df)
with tm.assert_produces_warning(FutureWarning):
next(hdf.iteritems())
5 changes: 5 additions & 0 deletions pandas/tests/series/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,3 +203,8 @@ def test_series_datetimelike_attribute_access_invalid(self):
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
ser.weekday

def test_series_iteritems_deprecated(self):
ser = Series([1])
with tm.assert_produces_warning(FutureWarning):
next(ser.iteritems())
6 changes: 3 additions & 3 deletions pandas/tests/series/test_iteration.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ def test_iter_strings(self, string_series):
assert val == string_series[i]

def test_iteritems_datetimes(self, datetime_series):
for idx, val in datetime_series.iteritems():
for idx, val in datetime_series.items():
assert val == datetime_series[idx]

def test_iteritems_strings(self, string_series):
for idx, val in string_series.iteritems():
for idx, val in string_series.items():
assert val == string_series[idx]

# assert is lazy (generators don't define reverse, lists do)
assert not hasattr(string_series.iteritems(), "reverse")
assert not hasattr(string_series.items(), "reverse")

def test_items_datetimes(self, datetime_series):
for idx, val in datetime_series.items():
Expand Down

0 comments on commit e255e56

Please sign in to comment.