Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add-time-unit-for-compare #2

Draft
wants to merge 6 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/pytest_benchmark/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

from _pytest import pathlib
from _pytest._io import TerminalWriter
from _pytest.config import Config
from _pytest.config.findpaths import locate_config

from pytest_benchmark.csv import CSVResults
Expand Down Expand Up @@ -148,7 +149,10 @@ def main():
histogram=first_or_value(args.histogram, False),
name_format=NAME_FORMATTERS[args.name],
logger=logger,
scale_unit=partial(hook.pytest_benchmark_scale_unit, config=None),
scale_unit=partial(
hook.pytest_benchmark_scale_unit,
config=Config.fromdictargs({"benchmark_time_unit": args.time_unit}, []),
),
)
groups = hook.pytest_benchmark_group_stats(
benchmarks=storage.load_benchmarks(*args.glob_or_file),
Expand Down
14 changes: 7 additions & 7 deletions src/pytest_benchmark/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,13 @@ def add_display_options(addoption, prefix='benchmark-'):
default='normal',
help="How to format names in results. Can be one of 'short', 'normal', 'long', or 'trial'. Default: %(default)r",
)
addoption(
f'--{prefix}time-unit',
metavar='COLUMN',
default=None,
choices=['ns', 'us', 'ms', 's', 'auto'],
help="Unit to scale the results to. Available units: 'ns', 'us', 'ms', 's'. Default: 'auto'.",
)


def add_histogram_options(addoption, prefix='benchmark-'):
Expand Down Expand Up @@ -298,13 +305,6 @@ def pytest_addoption(parser):
help='Save cprofile dumps as FILENAME-PREFIX-test_name.prof. If FILENAME-PREFIX contains'
f" slashes ('/') then directories will be created. Default: {cprofile_dump_prefix!r}",
)
group.addoption(
'--benchmark-time-unit',
metavar='COLUMN',
default=None,
choices=['ns', 'us', 'ms', 's', 'auto'],
help="Unit to scale the results to. Available units: 'ns', 'us', 'ms', 's'. Default: 'auto'.",
)
add_global_options(group.addoption)
add_display_options(group.addoption)
add_histogram_options(group.addoption)
Expand Down
53 changes: 53 additions & 0 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ def test_help_compare(testdir, args):
[
'usage: py.test-benchmark compare [-h] [--sort COL] [--group-by LABEL]',
' [--columns LABELS] [--name FORMAT]',
' [--time-unit COLUMN]',
' [--histogram [FILENAME-PREFIX]]',
' [--csv [FILENAME]]',
' [[]glob_or_file *[]]',
Expand All @@ -116,6 +117,8 @@ def test_help_compare(testdir, args):
" outliers, ops, rounds, iterations'",
" --name FORMAT How to format names in results. Can be one of 'short',",
" 'normal', 'long', or 'trial'. Default: 'normal'",
" --time-unit COLUMN Unit to scale the results to. Available units: 'ns',",
" 'us', 'ms', 's'. Default: 'auto'.",
' --histogram [FILENAME-PREFIX]',
' Plot graphs of min/max/avg/stddev over time in',
' FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX',
Expand Down Expand Up @@ -286,6 +289,56 @@ def test_compare(testdir, name, name_pattern_generator):
assert result.ret == 0


@pytest.mark.parametrize(
('name', 'name_pattern_generator', 'unit'),
[
('short', lambda n: '*xfast_parametrized[[]0[]] ' '(%.4d*)' % n, "s"),
('short', lambda n: '*xfast_parametrized[[]0[]] ' '(%.4d*)' % n, "ms"),
('short', lambda n: '*xfast_parametrized[[]0[]] ' '(%.4d*)' % n, "us"),
('short', lambda n: '*xfast_parametrized[[]0[]] ' '(%.4d*)' % n, "ns"),
],
)
def test_compare_with_unit_scale(testdir, name, name_pattern_generator, unit):
result = testdir.run(
'py.test-benchmark',
'--storage',
STORAGE,
'compare',
'0001',
'0002',
'0003',
'--sort',
'min',
'--columns',
'min,max',
'--name',
name,
'--histogram',
'foobar',
'--csv',
'foobar',
'--time-unit',
unit
)
result.stderr.fnmatch_lines(['Generated csv: *foobar.csv'])
LineMatcher(testdir.tmpdir.join('foobar.csv').readlines(cr=0)).fnmatch_lines(
[
'name,min,max',
'tests/test_normal.py::test_xfast_parametrized[[]0[]],2.15628567*e-07,1.03186158*e-05',
'tests/test_normal.py::test_xfast_parametrized[[]0[]],2.16902756*e-07,7.73929968*e-06',
'tests/test_normal.py::test_xfast_parametrized[[]0[]],2.17314542*e-07,1.14473891*e-05',
'',
]
)
result.stdout.fnmatch_lines(
[
'---*--- benchmark: 3 tests ---*---',
f'Name (time in {unit}) * Min * Max ',
]
)
assert result.ret == 0


def test_compare_csv(testdir):
test = testdir.makepyfile("""
import pytest
Expand Down
Loading