Skip to content

Commit

Permalink
chore(test-perf): use pytest-benchmark to improve reliability (#1250)
Browse files Browse the repository at this point in the history
  • Loading branch information
heitorlessa authored Jun 17, 2022
1 parent 21e1c5c commit 5fbff55
Show file tree
Hide file tree
Showing 4 changed files with 85 additions and 106 deletions.
70 changes: 33 additions & 37 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ mypy-boto3-secretsmanager = "^1.24.0"
mypy-boto3-ssm = "^1.24.0"
mypy-boto3-appconfig = "^1.24.0"
mypy-boto3-dynamodb = "^1.24.0"
pytest-benchmark = "^3.4.1"


[tool.poetry.extras]
Expand Down
18 changes: 0 additions & 18 deletions tests/performance/conftest.py

This file was deleted.

102 changes: 51 additions & 51 deletions tests/performance/test_high_level_imports.py
Original file line number Diff line number Diff line change
@@ -1,95 +1,95 @@
import importlib
import time
from contextlib import contextmanager
from types import ModuleType
from typing import Generator, Tuple
from typing import Tuple

import pytest

LOGGER_INIT_SLA: float = 0.005
METRICS_INIT_SLA: float = 0.005
TRACER_INIT_SLA: float = 0.5
IMPORT_INIT_SLA: float = 0.035
PARENT_PACKAGE = "aws_lambda_powertools"
TRACING_PACKAGE = "aws_lambda_powertools.tracing"
LOGGING_PACKAGE = "aws_lambda_powertools.logging"
METRICS_PACKAGE = "aws_lambda_powertools.metrics"


@contextmanager
def timing() -> Generator:
""" "Generator to quickly time operations. It can add 5ms so take that into account in elapsed time
def import_core_utilities() -> Tuple[ModuleType, ModuleType, ModuleType]:
"""Dynamically imports and return Tracing, Logging, and Metrics modules"""
return (
importlib.import_module(TRACING_PACKAGE),
importlib.import_module(LOGGING_PACKAGE),
importlib.import_module(METRICS_PACKAGE),
)

Examples
--------

with timing() as t:
print("something")
elapsed = t()
"""
start = time.perf_counter()
yield lambda: time.perf_counter() - start # gen as lambda to calculate elapsed time
@pytest.fixture(autouse=True)
def clear_cache():
importlib.invalidate_caches()


def core_utilities() -> Tuple[ModuleType, ModuleType, ModuleType]:
"""Return Tracing, Logging, and Metrics module"""
tracing = importlib.import_module("aws_lambda_powertools.tracing")
logging = importlib.import_module("aws_lambda_powertools.logging")
metrics = importlib.import_module("aws_lambda_powertools.metrics")
def import_init_tracer():
tracing = importlib.import_module(TRACING_PACKAGE)
tracing.Tracer(disabled=True)

return tracing, logging, metrics

def import_init_metrics():
metrics = importlib.import_module(METRICS_PACKAGE)
metrics.Metrics()


def import_init_logger():
logging = importlib.import_module(LOGGING_PACKAGE)
logging.Logger()


@pytest.mark.perf
def test_import_times_ceiling():
@pytest.mark.benchmark(group="core", disable_gc=True, warmup=False)
def test_import_times_ceiling(benchmark):
# GIVEN Core utilities are imported
# WHEN none are used
# THEN import and any global initialization perf should be below 30ms
# though we adjust to 35ms to take into account different CI machines, etc.
# instead of re-running tests which can lead to false positives
with timing() as t:
core_utilities()

elapsed = t()
if elapsed > IMPORT_INIT_SLA:
pytest.fail(f"High level imports should be below ${IMPORT_INIT_SLA}s: {elapsed}")
benchmark.pedantic(import_core_utilities)
stat = benchmark.stats.stats.max
if stat > IMPORT_INIT_SLA:
pytest.fail(f"High level imports should be below {IMPORT_INIT_SLA}s: {stat}")


@pytest.mark.perf
def test_tracer_init():
@pytest.mark.benchmark(group="core", disable_gc=True, warmup=False)
def test_tracer_init(benchmark):
# GIVEN Tracer is initialized
# WHEN default options are used
# THEN initialization X-Ray SDK perf should be below 450ms
# though we adjust to 500ms to take into account different CI machines, etc.
# instead of re-running tests which can lead to false positives
with timing() as t:
tracing, _, _ = core_utilities()
tracing.Tracer(disabled=True) # boto3 takes ~200ms, and remaining is X-Ray SDK init

elapsed = t()
if elapsed > TRACER_INIT_SLA:
pytest.fail(f"High level imports should be below ${TRACER_INIT_SLA}s: {elapsed}")
benchmark.pedantic(import_init_tracer)
stat = benchmark.stats.stats.max
if stat > TRACER_INIT_SLA:
pytest.fail(f"High level imports should be below {TRACER_INIT_SLA}s: {stat}")


@pytest.mark.perf
def test_metrics_init():
@pytest.mark.benchmark(group="core", disable_gc=True, warmup=False)
def test_metrics_init(benchmark):
# GIVEN Metrics is initialized
# WHEN default options are used
# THEN initialization perf should be below 5ms
with timing() as t:
_, _, metrics = core_utilities()
metrics.Metrics()

elapsed = t()
if elapsed > METRICS_INIT_SLA:
pytest.fail(f"High level imports should be below ${METRICS_INIT_SLA}s: {elapsed}")
benchmark.pedantic(import_init_metrics)
stat = benchmark.stats.stats.max
if stat > METRICS_INIT_SLA:
pytest.fail(f"High level imports should be below ${METRICS_INIT_SLA}s: {stat}")


@pytest.mark.perf
def test_logger_init():
@pytest.mark.benchmark(group="core", disable_gc=True, warmup=False)
def test_logger_init(benchmark):
# GIVEN Logger is initialized
# WHEN default options are used
# THEN initialization perf should be below 5ms
with timing() as t:
_, logging, _ = core_utilities()
logging.Logger()

elapsed = t()
if elapsed > LOGGER_INIT_SLA:
pytest.fail(f"High level imports should be below ${LOGGER_INIT_SLA}s: {elapsed}")
benchmark.pedantic(import_init_logger)
stat = benchmark.stats.stats.max
if stat > LOGGER_INIT_SLA:
pytest.fail(f"High level imports should be below ${LOGGER_INIT_SLA}s: {stat}")

0 comments on commit 5fbff55

Please sign in to comment.