Skip to content

Commit

Permalink
CLN: Remove unused functions (pandas-dev#57844)
Browse files Browse the repository at this point in the history
  • Loading branch information
tqa236 authored Mar 14, 2024
1 parent 34ec78b commit d831326
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 129 deletions.
41 changes: 0 additions & 41 deletions pandas/compat/numpy/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,10 +258,6 @@ def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
)

RESHAPE_DEFAULTS: dict[str, str] = {"order": "C"}
validate_reshape = CompatValidator(
RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
)

REPEAT_DEFAULTS: dict[str, Any] = {"axis": None}
validate_repeat = CompatValidator(
Expand All @@ -273,12 +269,6 @@ def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
)

SORT_DEFAULTS: dict[str, int | str | None] = {}
SORT_DEFAULTS["axis"] = -1
SORT_DEFAULTS["kind"] = "quicksort"
SORT_DEFAULTS["order"] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")

STAT_FUNC_DEFAULTS: dict[str, Any | None] = {}
STAT_FUNC_DEFAULTS["dtype"] = None
STAT_FUNC_DEFAULTS["out"] = None
Expand Down Expand Up @@ -324,20 +314,6 @@ def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")


def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool:
"""
If this function is called via the 'numpy' library, the third parameter in
its signature is 'axis', which takes either an ndarray or 'None', so check
if the 'convert' parameter is either an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True

validate_take(args, kwargs, max_fname_arg_count=3, method="both")
return convert


TRANSPOSE_DEFAULTS = {"axes": None}
validate_transpose = CompatValidator(
TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
Expand All @@ -362,23 +338,6 @@ def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None:
)


RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")


def validate_resampler_func(method: str, args, kwargs) -> None:
"""
'args' and 'kwargs' should be empty because all of their necessary
parameters are explicitly listed in the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall(
"numpy operations are not valid with resample. "
f"Use .resample(...).{method}() instead"
)
raise TypeError("too many arguments passed in")


def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None:
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is zero
Expand Down
19 changes: 0 additions & 19 deletions pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from __future__ import annotations

from functools import wraps
import inspect
import re
from typing import (
Expand Down Expand Up @@ -31,7 +30,6 @@
AxisInt,
DtypeBackend,
DtypeObj,
F,
FillnaOptions,
IgnoreRaise,
InterpolateOptions,
Expand Down Expand Up @@ -131,23 +129,6 @@
_dtype_obj = np.dtype("object")


def maybe_split(meth: F) -> F:
"""
If we have a multi-column block, split and operate block-wise. Otherwise
use the original method.
"""

@wraps(meth)
def newfunc(self, *args, **kwargs) -> list[Block]:
if self.ndim == 1 or self.shape[0] == 1:
return meth(self, *args, **kwargs)
else:
# Split and operate column-by-column
return self.split_and_operate(meth, *args, **kwargs)

return cast(F, newfunc)


class Block(PandasObject, libinternals.Block):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
Expand Down
49 changes: 0 additions & 49 deletions pandas/core/methods/describe.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

import numpy as np

from pandas._libs.tslibs import Timestamp
from pandas._typing import (
DtypeObj,
NDFrameT,
Expand Down Expand Up @@ -288,54 +287,6 @@ def describe_categorical_1d(
return Series(result, index=names, name=data.name, dtype=dtype)


def describe_timestamp_as_categorical_1d(
data: Series,
percentiles_ignored: Sequence[float],
) -> Series:
"""Describe series containing timestamp data treated as categorical.
Parameters
----------
data : Series
Series to be described.
percentiles_ignored : list-like of numbers
Ignored, but in place to unify interface.
"""
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result: list[float | Timestamp] = [data.count(), count_unique]
dtype = None
if count_unique > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
tz = data.dt.tz
asint = data.dropna().values.view("i8")
top = Timestamp(top)
if top.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
top = top.tz_convert(tz)
else:
top = top.tz_localize(tz)
names += ["top", "freq", "first", "last"]
result += [
top,
freq,
Timestamp(asint.min(), tz=tz),
Timestamp(asint.max(), tz=tz),
]

# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"

from pandas import Series

return Series(result, index=names, name=data.name, dtype=dtype)


def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:
"""Describe series containing datetime64 dtype.
Expand Down
20 changes: 0 additions & 20 deletions pandas/core/sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,9 @@

from __future__ import annotations

from collections import defaultdict
from typing import (
TYPE_CHECKING,
Callable,
DefaultDict,
cast,
)

Expand Down Expand Up @@ -34,7 +32,6 @@
if TYPE_CHECKING:
from collections.abc import (
Hashable,
Iterable,
Sequence,
)

Expand Down Expand Up @@ -592,23 +589,6 @@ def ensure_key_mapped(
return result


def get_flattened_list(
comp_ids: npt.NDArray[np.intp],
ngroups: int,
levels: Iterable[Index],
labels: Iterable[np.ndarray],
) -> list[tuple]:
"""Map compressed group id -> key tuple."""
comp_ids = comp_ids.astype(np.int64, copy=False)
arrays: DefaultDict[int, list[int]] = defaultdict(list)
for labs, level in zip(labels, levels):
table = hashtable.Int64HashTable(ngroups)
table.map_keys_to_values(comp_ids, labs.astype(np.int64, copy=False))
for i in range(ngroups):
arrays[i].append(level[table.get_item(i)])
return [tuple(array) for array in arrays.values()]


def get_indexer_dict(
label_list: list[np.ndarray], keys: list[Index]
) -> dict[Hashable, npt.NDArray[np.intp]]:
Expand Down

0 comments on commit d831326

Please sign in to comment.