diff --git a/Makefile b/Makefile index 141eaa8..53ebc79 100644 --- a/Makefile +++ b/Makefile @@ -18,14 +18,12 @@ install: nightly .PHONY: test test: - # Run tests outside of project folder. - # See https://github.com/PyO3/pyo3/issues/105 - cd .. && pytest --verbose --capture=no $(DIR) + pytest tests .PHONY: bench bench: - python3 benchmark/benchmark.py skip-lib-comps + pytest benchmarks .PHONY: bench-all bench-all: - python3 benchmark/benchmark.py \ No newline at end of file + pytest benchmarks --compare \ No newline at end of file diff --git a/benchmark/benchmark.py b/benchmarks/benchmark_ujson.py similarity index 100% rename from benchmark/benchmark.py rename to benchmarks/benchmark_ujson.py diff --git a/benchmark/dict_string_int_plain.txt b/benchmarks/dict_string_int_plain.txt similarity index 100% rename from benchmark/dict_string_int_plain.txt rename to benchmarks/dict_string_int_plain.txt diff --git a/benchmark/sample.json b/benchmarks/sample.json similarity index 100% rename from benchmark/sample.json rename to benchmarks/sample.json diff --git a/benchmarks/test_benchmark.py b/benchmarks/test_benchmark.py new file mode 100644 index 0000000..216f31d --- /dev/null +++ b/benchmarks/test_benchmark.py @@ -0,0 +1,81 @@ +import sys +import random +import pytest + +benchmark = pytest.importorskip('pytest_benchmark') + +doubles = [] +numbers = [] +unicode_strings = [] +strings = [] +booleans = [] +list_dicts = [] +dict_lists = {} + +composite_object = { + 'words': """ + Lorem ipsum dolor sit amet, consectetur adipiscing + elit. Mauris adipiscing adipiscing placerat. + Vestibulum augue augue, + pellentesque quis sollicitudin id, adipiscing. + """, + 'list': list(range(200)), + 'dict': dict((str(i), 'a') for i in list(range(200))), + 'int': 100100100, + 'float': 100999.123456 +} + +doubles = [sys.maxsize * random.random() for _ in range(256)] +unicode_strings = [ + "نظام الحكم سلطاني وراثي في الذكور من ذرية السيد تركي بن سعيد بن سلطان ويشترط فيمن يختار لولاية الحكم من بينهم ان يكون مسلما رشيدا عاقلا ًوابنا شرعيا لابوين عمانيين " for _ in range(256)] +strings = ["A pretty long string which is in a list"] * 256 + +booleans = [True] * 256 + +for y in range(100): + arrays = [] + list_dicts.append({str(random.random()*20): int(random.random()*1000000)}) + + for x in range(100): + arrays.append({str(random.random() * 20): int(random.random()*1000000)}) + dict_lists[str(random.random() * 20)] = arrays + +user = { + "userId": 3381293, + "age": 213, + "username": "johndoe", + "fullname": u"John Doe the Second", + "isAuthorized": True, + "liked": 31231.31231202, + "approval": 31.1471, + "jobs": [1, 2], + "currJob": None +} + +friends = [user, user, user, user, user, user, user, user] +complex_object = [ + [user, friends], [user, friends], [user, friends], + [user, friends], [user, friends], [user, friends] +] +datasets = [('composite object', composite_object), + ('256 doubles array', doubles), + ('256 unicode array', unicode_strings), + ('256 ascii array', strings), + ('256 Trues array', booleans), + ('100 dicts array', list_dicts), + # ('100 arrays dict', dict_lists), + ('complex object', complex_object), + ] + + +@pytest.mark.benchmark(group='serialize') +@pytest.mark.parametrize('data', [d[1] for d in datasets], ids=[d[0] for d in datasets]) +def test_dumps(contender, data, benchmark): + benchmark(contender.dumps, data) + + +@pytest.mark.benchmark(group='deserialize') +@pytest.mark.parametrize('data', [d[1] for d in datasets], ids=[d[0] for d in datasets]) +def test_loads(contender, data, benchmark): + data = contender.dumps(data) + benchmark(contender.loads, data) diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..d1e5a07 --- /dev/null +++ b/conftest.py @@ -0,0 +1,61 @@ +# based on https://github.com/lelit/python-rapidjson/blob/master/benchmarks/conftest.py +import hyperjson +from collections import namedtuple +from operator import attrgetter + +Contender = namedtuple('Contender', 'name,dumps,loads') + + +def pytest_benchmark_group_stats(config, benchmarks, group_by): + result = {} + for bench in benchmarks: + engine, data_kind = bench['param'].split('-') + group = result.setdefault("%s: %s" % (data_kind, bench['group']), []) + group.append(bench) + return sorted(result.items()) + + +def pytest_addoption(parser): + parser.addoption('--compare', action='store_true', + help='compare against other JSON engines') + + +contenders = [] + +contenders.append(Contender('hyperjson', + hyperjson.dumps, + hyperjson.loads)) +try: + import simplejson +except ImportError: + pass +else: + contenders.append(Contender('simplejson', + simplejson.dumps, + simplejson.loads)) +try: + import ujson +except ImportError: + pass +else: + contenders.append(Contender('ujson', + ujson.dumps, + ujson.loads)) +try: + import yajl +except ImportError: + pass +else: + contenders.append(Contender('yajl', + yajl.dumps, + yajl.loads)) + + +def pytest_generate_tests(metafunc): + if 'contender' in metafunc.fixturenames: + if metafunc.config.getoption('compare'): + metafunc.parametrize('contender', contenders, + ids=attrgetter('name')) + else: + metafunc.parametrize( + 'contender', contenders[:1], ids=attrgetter('name')) diff --git a/requirements-test.txt b/requirements-test.txt index c17d467..3d3764e 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,2 +1,4 @@ pytest>=3.5.0 pytest-runner>=4.2 # add setup.py test support for pytest +pytest-benchmark>=3.1 +