Skip to content

Commit

Permalink
remove deprecated write_df()
Browse files Browse the repository at this point in the history
was slated for removal 2 years ago in v0.2.8 on 2022-06-29
ruff unignore FBT001 FBT002 DTZ005 PLR ARG001 and fix existing errors
  • Loading branch information
janosh committed Jun 9, 2024
1 parent 793917c commit f90ba98
Show file tree
Hide file tree
Showing 9 changed files with 32 additions and 39 deletions.
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,19 @@ default_install_hook_types: [pre-commit, commit-msg]

repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.4
rev: v0.4.8
hooks:
- id: ruff
args: [--fix]
- id: ruff-format

- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.9.0
rev: v1.10.0
hooks:
- id: mypy

- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: v4.6.0
hooks:
- id: check-case-conflict
- id: check-symlinks
Expand All @@ -31,7 +31,7 @@ repos:
- id: trailing-whitespace

- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
rev: v2.3.0
hooks:
- id: codespell
stages: [commit, commit-msg]
Expand Down
18 changes: 8 additions & 10 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -69,27 +69,25 @@ include = ["**/pyproject.toml", "*.ipynb", "*.py", "*.pyi"]
[tool.ruff.lint]
select = ["ALL"]
ignore = [
"ANN101", # Missing type annotation for self in method
"ANN101", # Missing type annotation for self in method
"ANN401",
"ARG001",
"C901",
"COM812",
"D100", # Missing docstring in public module
"D205", # 1 blank line required between summary line and description
"DTZ005",
"D100", # Missing docstring in public module
"D205", # 1 blank line required between summary line and description
"EM101",
"EM102",
"FBT001",
"FBT002",
"PLR", # pylint refactor
"PT006", # pytest-parametrize-names-wrong-type
"PLR0912",
"PLR0913",
"PLR0915",
"PT006", # pytest-parametrize-names-wrong-type
"PTH",
"T201",
"TRY003",
]
pydocstyle.convention = "google"

[tool.ruff.lint.per-file-ignores]
"tests/*" = ["D103", "D104", "INP001", "S101"]
"tests/*" = ["D103", "D104", "FBT001", "INP001", "S101"]
"__init__.py" = ["F401"]
"examples/*" = ["D102", "D103", "D107", "E402", "FA102"]
2 changes: 1 addition & 1 deletion tensorboard_reducer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from tensorboard_reducer.load import load_tb_events
from tensorboard_reducer.main import main
from tensorboard_reducer.reduce import reduce_events
from tensorboard_reducer.write import write_data_file, write_df, write_tb_events
from tensorboard_reducer.write import write_data_file, write_tb_events

try: # noqa: SIM105
__version__ = version("tensorboard-reducer")
Expand Down
5 changes: 4 additions & 1 deletion tensorboard_reducer/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

def load_tb_events(
input_dirs: list[str],
*,
strict_tags: bool = True,
strict_steps: bool = True,
handle_dup_steps: HandleDupSteps | None = None,
Expand Down Expand Up @@ -192,7 +193,9 @@ def load_tb_events(
for tag in list(out_dict)[:50]:
df_scalar = out_dict[tag]
print(f"- '{tag}': {df_scalar.shape}")
if len(out_dict) > 50:

max_tags_to_print = 50
if len(out_dict) > max_tags_to_print:
print("...")

return out_dict
5 changes: 3 additions & 2 deletions tensorboard_reducer/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,10 +117,11 @@ def main(argv: list[str] | None = None) -> int:

reduced_events = reduce_events(events_dict, reduce_ops, verbose=args.verbose)

common_kwds = {"overwrite": overwrite, "verbose": args.verbose}
if out_path.endswith(".csv"):
write_data_file(reduced_events, out_path, overwrite, verbose=args.verbose)
write_data_file(reduced_events, out_path, **common_kwds)
else:
write_tb_events(reduced_events, out_path, overwrite, verbose=args.verbose)
write_tb_events(reduced_events, out_path, **common_kwds)
return 0


Expand Down
1 change: 1 addition & 0 deletions tensorboard_reducer/reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
def reduce_events(
events_dict: dict[str, pd.DataFrame],
reduce_ops: Sequence[str],
*,
verbose: bool = False,
) -> dict[str, dict[str, pd.DataFrame]]:
"""Perform numpy reduce operations along the last dimension of each array in a
Expand Down
18 changes: 6 additions & 12 deletions tensorboard_reducer/write.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,14 @@

import os
import sys
from typing import Any

import pandas as pd
from tqdm import tqdm

_known_extensions = (".csv", ".json", ".xlsx")


def _rm_rf_or_raise(path: str, overwrite: bool) -> None:
def _rm_rf_or_raise(path: str, *, overwrite: bool) -> None:
"""Remove the directory tree below dir if overwrite is True.
Args:
Expand Down Expand Up @@ -48,6 +47,7 @@ def _rm_rf_or_raise(path: str, overwrite: bool) -> None:
def write_tb_events(
data_to_write: dict[str, dict[str, pd.DataFrame]],
out_dir: str,
*,
overwrite: bool = False,
verbose: bool = False,
) -> list[str]:
Expand Down Expand Up @@ -97,7 +97,7 @@ def write_tb_events(
if verbose:
print(f"Writing mean{symbol}std reduction to disk...", file=sys.stderr)

_rm_rf_or_raise(std_out_dir, overwrite)
_rm_rf_or_raise(std_out_dir, overwrite=overwrite)
out_dirs.append(std_out_dir)

writer = SummaryWriter(std_out_dir)
Expand All @@ -116,7 +116,7 @@ def write_tb_events(
op_out_dir = f"{out_dir}{out_dir_op_connector}{op}"
out_dirs.append(op_out_dir)

_rm_rf_or_raise(op_out_dir, overwrite)
_rm_rf_or_raise(op_out_dir, overwrite=overwrite)

writer = SummaryWriter(op_out_dir)

Expand All @@ -136,16 +136,10 @@ def write_tb_events(
return out_dirs


def write_df(*args: Any) -> None:
"""Inform users of breaking change if they try to use the old API."""
raise NotImplementedError(
"write_df() was renamed to write_data_file() in tensorboard-reducer v0.2.8"
)


def write_data_file(
data_to_write: dict[str, dict[str, pd.DataFrame]],
out_path: str,
*,
overwrite: bool = False,
verbose: bool = False,
) -> str:
Expand All @@ -168,7 +162,7 @@ def write_data_file(
Returns:
str: Path to the new data file.
"""
_rm_rf_or_raise(out_path, overwrite)
_rm_rf_or_raise(out_path, overwrite=overwrite)

# create multi-index dataframe from event data with reduce op names as 1st-level col
# names and tag names as 2nd level
Expand Down
9 changes: 5 additions & 4 deletions tests/test_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

@pytest.mark.parametrize("verbose", [True, False])
def test_load_tb_events_strict(
verbose: bool, capsys: pytest.CaptureFixture[str]
*, verbose: bool, capsys: pytest.CaptureFixture[str]
) -> None:
"""Test load_tb_events for strict input data, i.e. without any of the special cases
below.
Expand All @@ -32,13 +32,14 @@ def test_load_tb_events_strict(
assert actual_keys == ["strict/foo"], assert_keys

n_steps, n_runs = events_dict["strict/foo"].shape
n_expected = 100
assert_len = (
f"load_tb_events() returned TB event with {n_steps} steps, expected 100"
f"load_tb_events() returned TB event with {n_steps} steps, {n_expected=}"
)
assert n_steps == 100, assert_len
assert n_steps == n_expected, assert_len

assert_len = f"load_tb_events() returned {n_runs} TB runs, expected 3"
assert n_steps == 100, assert_len
assert n_steps == n_expected, assert_len

# columns correspond to different runs for the same tag, the mean across a run is
# meaningless and only used for asserting value constancy
Expand Down
5 changes: 0 additions & 5 deletions tests/test_write.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,3 @@ def test_write_data_file_with_bad_ext(
) -> None:
with pytest.raises(ValueError, match="has unknown extension, should be one of"):
tbr.write_data_file(reduced_events, "foo.bad_ext")


def test_write_df() -> None:
with pytest.raises(NotImplementedError, match=r"write_df\(\) was renamed"):
tbr.write_df(None, "foo.csv")

0 comments on commit f90ba98

Please sign in to comment.