Skip to content

Commit

Permalink
Repair and refine performance benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
cjdsellers committed Jan 5, 2024
1 parent f1e80d2 commit 5ac2bfb
Show file tree
Hide file tree
Showing 3 changed files with 128 additions and 144 deletions.
11 changes: 9 additions & 2 deletions tests/performance_tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,20 @@

import pytest

from nautilus_trader.common.clock import TestClock
from nautilus_trader.common.clock import LiveClock
from nautilus_trader.common.logging import Logger


@pytest.fixture(autouse=True)
def setup_benchmark(benchmark):
benchmark.extra_info["disable_gc"] = True
benchmark.extra_info["warmup"] = True
return benchmark


@pytest.fixture()
def clock():
return TestClock()
return LiveClock()


@pytest.fixture()
Expand Down
143 changes: 60 additions & 83 deletions tests/performance_tests/test_perf_catalog.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,114 +14,91 @@
# -------------------------------------------------------------------------------------------------

import os
import shutil
import tempfile
from pathlib import Path
from typing import Any

import pytest

from nautilus_trader import PACKAGE_ROOT
from nautilus_trader.core.nautilus_pyo3 import DataBackendSession
from nautilus_trader.core.nautilus_pyo3 import NautilusDataType
from nautilus_trader.model.data import capsule_to_list
from nautilus_trader.test_kit.mocks.data import data_catalog_setup
from nautilus_trader.test_kit.performance import PerformanceHarness
from tests.unit_tests.persistence.test_catalog import TestPersistenceCatalog
from nautilus_trader.test_kit.mocks.data import load_catalog_with_stub_quote_ticks_audusd
from nautilus_trader.test_kit.mocks.data import setup_catalog


# TODO: skip in CI
pytestmark = pytest.mark.skip(reason="Repair order book parsing")
@pytest.mark.skip("Too slow (currently WIP)")
def test_write_quote_ticks(benchmark: Any) -> None:
catalog = setup_catalog("file")

def run():
load_catalog_with_stub_quote_ticks_audusd(catalog)
quotes = catalog.quote_ticks()
assert len(quotes) == 100_000

@pytest.mark.skip(reason="update tests for new API")
class TestCatalogPerformance(PerformanceHarness):
@staticmethod
def test_load_quote_ticks_python(benchmark):
tempdir = tempfile.mkdtemp()
benchmark.pedantic(run, rounds=10, iterations=1, warmup_rounds=1)

def setup():
# Arrange
cls = TestPersistenceCatalog()

cls.catalog = data_catalog_setup(protocol="file", path=tempdir)
def test_load_quote_ticks(benchmark: Any) -> None:
catalog = setup_catalog("file")
load_catalog_with_stub_quote_ticks_audusd(catalog)

cls._load_quote_ticks_into_catalog()
def run():
quotes = catalog.quote_ticks()
assert len(quotes) == 100_000

# Act
return (cls.catalog,), {}
benchmark.pedantic(run, rounds=10, iterations=1, warmup_rounds=1)

def run(catalog):
quotes = catalog.quote_ticks()
assert len(quotes) == 9500

benchmark.pedantic(run, setup=setup, rounds=1, iterations=1, warmup_rounds=1)
shutil.rmtree(tempdir)
@pytest.mark.skip("Development use")
def test_load_single_stream(benchmark: Any) -> None:
def setup():
file_path = Path(PACKAGE_ROOT) / "bench_data" / "quotes_0005.parquet"
session = DataBackendSession()
session.add_file(
NautilusDataType.QuoteTick,
"quote_ticks",
str(file_path),
)
return (session.to_query_result(),), {}

@staticmethod
def test_load_quote_ticks_rust(benchmark):
tempdir = tempfile.mkdtemp()
def run(result):
count = 0
for chunk in result:
count += len(capsule_to_list(chunk))

def setup():
# Arrange
cls = TestPersistenceCatalog()
assert count == 9689614

cls.catalog = data_catalog_setup(protocol="file", path=tempdir)
benchmark.pedantic(run, setup=setup, rounds=1, iterations=1, warmup_rounds=1)

cls._load_quote_ticks_into_catalog()

# Act
return (cls.catalog,), {}
@pytest.mark.skip("Development use")
def test_load_multi_stream_catalog_v2(benchmark: Any) -> None:
def setup():
dir_path = os.path.join(PACKAGE_ROOT, "bench_data/multi_stream_data/")

def run(catalog):
quotes = catalog.quote_ticks()
assert len(quotes) == 9500
session = DataBackendSession()

benchmark.pedantic(run, setup=setup, rounds=1, iterations=1, warmup_rounds=1)
shutil.rmtree(tempdir)
for dirpath, _, filenames in os.walk(dir_path):
for filename in filenames:
if filename.endswith("parquet"):
file_stem = os.path.splitext(filename)[0]
if "quotes" in filename:
full_path = os.path.join(dirpath, filename)
session.add_file(NautilusDataType.QuoteTick, file_stem, full_path)
elif "trades" in filename:
full_path = os.path.join(dirpath, filename)
session.add_file(NautilusDataType.TradeTick, file_stem, full_path)

@staticmethod
def test_load_single_stream_catalog_v2(benchmark):
def setup():
file_path = os.path.join(PACKAGE_ROOT, "bench_data/quotes_0005.parquet")
session = DataBackendSession()
session.add_file("quote_ticks", file_path, NautilusDataType.QuoteTick)
return (session.to_query_result(),), {}
return (session.to_query_result(),), {}

def run(result):
count = 0
for chunk in result:
count += len(capsule_to_list(chunk))
def run(result):
count = 0
for chunk in result:
ticks = capsule_to_list(chunk)
count += len(ticks)

assert count == 9689614
# Check total count is correct
assert count == 72_536_038

benchmark.pedantic(run, setup=setup, rounds=1, iterations=1, warmup_rounds=1)

@staticmethod
def test_load_multi_stream_catalog_v2(benchmark):
def setup():
dir_path = os.path.join(PACKAGE_ROOT, "bench_data/multi_stream_data/")

session = DataBackendSession()

for dirpath, _, filenames in os.walk(dir_path):
for filename in filenames:
if filename.endswith("parquet"):
file_stem = os.path.splitext(filename)[0]
if "quotes" in filename:
full_path = os.path.join(dirpath, filename)
session.add_file(file_stem, full_path, NautilusDataType.QuoteTick)
elif "trades" in filename:
full_path = os.path.join(dirpath, filename)
session.add_file(file_stem, full_path, NautilusDataType.TradeTick)

return (session.to_query_result(),), {}

def run(result):
count = 0
for chunk in result:
ticks = capsule_to_list(chunk)
count += len(ticks)

# Check total count is correct
assert count == 72_536_038

benchmark.pedantic(run, setup=setup, rounds=1, iterations=1, warmup_rounds=1)
benchmark.pedantic(run, setup=setup, rounds=1, iterations=1, warmup_rounds=1)
118 changes: 59 additions & 59 deletions tests/performance_tests/test_perf_clock.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,68 +14,68 @@
# -------------------------------------------------------------------------------------------------

from datetime import timedelta
from typing import Any

from nautilus_trader.common.clock import LiveClock
from nautilus_trader.common.clock import TestClock
from nautilus_trader.test_kit.performance import PerformanceHarness


live_clock = LiveClock()
test_clock = TestClock()


class TestLiveClockPerformance(PerformanceHarness):
def test_utc_now(self):
self.benchmark.pedantic(
target=live_clock.timestamp_ns,
iterations=100_000,
rounds=1,
)
# ~0.0ms / ~1.3μs / 1330ns minimum of 100,000 runs @ 1 iteration each run.

def test_unix_timestamp(self):
self.benchmark.pedantic(
target=live_clock.timestamp,
iterations=100_000,
rounds=1,
)
# ~0.0ms / ~0.1μs / 101ns minimum of 100,000 runs @ 1 iteration each run.

def test_unix_timestamp_ns(self):
self.benchmark.pedantic(
target=live_clock.timestamp_ns,
iterations=100_000,
rounds=1,
)
# ~0.0ms / ~0.1μs / 101ns minimum of 100,000 runs @ 1 iteration each run.


class TestClockHarness:
@staticmethod
def iteratively_advance_time():
from nautilus_trader.common.clock import TimeEvent


_LIVE_CLOCK = LiveClock()
_TEST_CLOCK = TestClock()


def test_live_clock_utc_now(benchmark: Any) -> None:
benchmark.pedantic(
target=_LIVE_CLOCK.timestamp_ns,
iterations=100_000,
rounds=1,
)
# ~0.0ms / ~1.3μs / 1330ns minimum of 100,000 runs @ 1 iteration each run.


def test_live_clock_unix_timestamp(benchmark: Any) -> None:
benchmark.pedantic(
target=_LIVE_CLOCK.timestamp,
iterations=100_000,
rounds=1,
)
# ~0.0ms / ~0.1μs / 101ns minimum of 100,000 runs @ 1 iteration each run.


def test_live_clock_timestamp_ns(benchmark: Any) -> None:
benchmark.pedantic(
target=_LIVE_CLOCK.timestamp_ns,
iterations=100_000,
rounds=1,
)
# ~0.0ms / ~0.1μs / 101ns minimum of 100,000 runs @ 1 iteration each run.


def test_advance_time(benchmark: Any) -> None:
benchmark.pedantic(
target=_TEST_CLOCK.advance_time,
args=(0,),
iterations=100_000,
rounds=1,
)
# ~0.0ms / ~0.2μs / 175ns minimum of 100,000 runs @ 1 iteration each run.


def test_iteratively_advance_time(benchmark: Any) -> None:
store: list[TimeEvent] = []
_TEST_CLOCK.set_timer("test", timedelta(seconds=1), callback=store.append)

def _iteratively_advance_time():
test_time = 0
for _ in range(100000):
test_time += 1
test_clock.advance_time(to_time_ns=test_time)


class TestClockPerformanceTests(PerformanceHarness):
def test_advance_time(self):
self.benchmark.pedantic(
target=test_clock.advance_time,
args=(0,),
iterations=100_000,
rounds=1,
)
# ~0.0ms / ~0.2μs / 175ns minimum of 100,000 runs @ 1 iteration each run.

def test_iteratively_advance_time(self):
store = []
test_clock.set_timer("test", timedelta(seconds=1), callback=store.append)
self.benchmark.pedantic(
target=TestClockHarness.iteratively_advance_time,
iterations=1,
rounds=1,
)
# ~320.1ms minimum of 1 runs @ 1 iteration each run. (100000 advances)
# ~3.7ms / ~3655.1μs / 3655108ns minimum of 1 runs @ 1 iteration each run.
_TEST_CLOCK.advance_time(to_time_ns=test_time)

benchmark.pedantic(
target=_iteratively_advance_time,
iterations=1,
rounds=1,
)
# ~320.1ms minimum of 1 runs @ 1 iteration each run. (100000 advances)
# ~3.7ms / ~3655.1μs / 3655108ns minimum of 1 runs @ 1 iteration each run.

0 comments on commit 5ac2bfb

Please sign in to comment.