diff --git a/Makefile b/Makefile index a555d4447a5..5e2a50ade4a 100644 --- a/Makefile +++ b/Makefile @@ -42,10 +42,10 @@ build-docs-website: dev-docs cd docs && npm run build cp -R docs/public/* dist/ -docs-dev: +docs-local: cd docs && npm run start -docs-api-dev: +docs-api-local: poetry run pdoc --http : aws_lambda_powertools security-baseline: diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index c8475987550..aa0293c3793 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -35,7 +35,7 @@ class MetricManager: Environment variables --------------------- - POWERTOOLS_SERVICE_NAME : str + POWERTOOLS_METRICS_NAMESPACE : str metric namespace to be set for all metrics Raises @@ -53,7 +53,7 @@ class MetricManager: def __init__(self, metric_set: Dict[str, str] = None, dimension_set: Dict = None, namespace: str = None): self.metric_set = metric_set if metric_set is not None else {} self.dimension_set = dimension_set if dimension_set is not None else {} - self.namespace = namespace or os.getenv("POWERTOOLS_SERVICE_NAME") + self.namespace = namespace or os.getenv("POWERTOOLS_METRICS_NAMESPACE") self._metric_units = [unit.value for unit in MetricUnit] self._metric_unit_options = list(MetricUnit.__members__) diff --git a/aws_lambda_powertools/metrics/metric.py b/aws_lambda_powertools/metrics/metric.py index 4bb67a4c761..6f57bb680df 100644 --- a/aws_lambda_powertools/metrics/metric.py +++ b/aws_lambda_powertools/metrics/metric.py @@ -21,7 +21,7 @@ class SingleMetric(MetricManager): Environment variables --------------------- - POWERTOOLS_SERVICE_NAME : str + POWERTOOLS_METRICS_NAMESPACE : str metric namespace Example @@ -30,7 +30,7 @@ class SingleMetric(MetricManager): from aws_lambda_powertools.metrics import SingleMetric, MetricUnit import json - metric = Single_Metric(service="ServerlessAirline") + metric = Single_Metric(namespace="ServerlessAirline") metric.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) metric.add_dimension(name="function_version", value=47) @@ -62,7 +62,7 @@ def add_metric(self, name: str, unit: MetricUnit, value: float): @contextmanager -def single_metric(name: str, unit: MetricUnit, value: float, service: str = None): +def single_metric(name: str, unit: MetricUnit, value: float, namespace: str = None): """Context manager to simplify creation of a single metric Example @@ -71,12 +71,12 @@ def single_metric(name: str, unit: MetricUnit, value: float, service: str = None from aws_lambda_powertools.metrics import single_metric, MetricUnit - with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, service="ServerlessAirline") as metric: + with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace="ServerlessAirline") as metric: metric.add_dimension(name="function_version", value=47) **Same as above but set namespace using environment variable** - $ export POWERTOOLS_SERVICE_NAME="ServerlessAirline" + $ export POWERTOOLS_METRICS_NAMESPACE="ServerlessAirline" from aws_lambda_powertools.metrics import single_metric, MetricUnit @@ -91,8 +91,8 @@ def single_metric(name: str, unit: MetricUnit, value: float, service: str = None `aws_lambda_powertools.helper.models.MetricUnit` value : float Metric value - service: str - Service name used as namespace + namespace: str + Namespace for metrics Yields ------- @@ -106,7 +106,7 @@ def single_metric(name: str, unit: MetricUnit, value: float, service: str = None """ metric_set = None try: - metric: SingleMetric = SingleMetric(namespace=service) + metric: SingleMetric = SingleMetric(namespace=namespace) metric.add_metric(name=name, unit=unit, value=value) yield metric logger.debug("Serializing single metric") diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 3dc3d80961f..a6da64ea85b 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -1,6 +1,7 @@ import functools import json import logging +import os from typing import Any, Callable from aws_lambda_powertools.metrics.base import MetricManager @@ -29,10 +30,9 @@ class Metrics(MetricManager): from aws_lambda_powertools.metrics import Metrics - metrics = Metrics(service="ServerlessAirline") + metrics = Metrics(namespace="ServerlessAirline", service="payment") metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) metrics.add_metric(name="BookingConfirmation", unit="Count", value=1) - metrics.add_dimension(name="service", value="booking") metrics.add_dimension(name="function_version", value="$LATEST") ... @@ -47,8 +47,10 @@ def do_something(): Environment variables --------------------- - POWERTOOLS_SERVICE_NAME : str + POWERTOOLS_METRICS_NAMESPACE : str metric namespace + POWERTOOLS_SERVICE_NAME : str + service name used for default dimension Parameters ---------- @@ -64,13 +66,14 @@ def do_something(): _metrics = {} _dimensions = {} - def __init__( - self, service: str = None, - ): + def __init__(self, service: str = None, namespace: str = None): self.metric_set = self._metrics self.dimension_set = self._dimensions - self.service = service - super().__init__(metric_set=self.metric_set, dimension_set=self.dimension_set, namespace=self.service) + self.service = service or os.environ.get("POWERTOOLS_SERVICE_NAME") + self.namespace = namespace + if self.service: + self.dimension_set["service"] = self.service + super().__init__(metric_set=self.metric_set, dimension_set=self.dimension_set, namespace=self.namespace) def clear_metrics(self): logger.debug("Clearing out existing metric set from memory") diff --git a/docs/content/core/metrics.mdx b/docs/content/core/metrics.mdx index 55cbd7294a8..ee396332fcb 100644 --- a/docs/content/core/metrics.mdx +++ b/docs/content/core/metrics.mdx @@ -16,7 +16,7 @@ Metrics creates custom metrics asynchronously via logging metrics to standard ou ## Initialization -Set `POWERTOOLS_SERVICE_NAME` env var as a start - Here is an example using AWS Serverless Application Model (SAM) +Set `POWERTOOLS_SERVICE_NAME` and `POWERTOOLS_METRICS_NAMESPACE` env vars as a start - Here is an example using AWS Serverless Application Model (SAM) ```yaml:title=template.yaml Resources: @@ -27,37 +27,39 @@ Resources: Runtime: python3.8 Environment: Variables: - POWERTOOLS_SERVICE_NAME: ServerlessAirline # highlight-line + POWERTOOLS_SERVICE_NAME: payment # highlight-line + POWERTOOLS_METRICS_NAMESPACE: ServerlessAirline # highlight-line ``` We recommend you use your application or main service as a metric namespace. -You can explicitly set a namespace name via `service` param or via `POWERTOOLS_SERVICE_NAME` env var. This sets **namespace** key that will be used for all metrics. +You can explicitly set a namespace name via `namespace` param or via `POWERTOOLS_METRICS_NAMESPACE` env var. This sets **namespace** key that will be used for all metrics. +You can also pass a service name via `service` param or `POWERTOOLS_SERVICE_NAME` env var. This will create a dimension with the service name. ```python:title=app.py from aws_lambda_powertools.metrics import Metrics, MetricUnit -# POWERTOOLS_SERVICE_NAME defined -metrics = Metrics() # highlight-line +# POWERTOOLS_METRICS_NAMESPACE and POWERTOOLS_SERVICE_NAME defined +metrics = Metrics() # highlight-line # Explicit definition -Metrics(service="ServerlessAirline") # sets namespace to "ServerlessAirline" +Metrics(namespace="ServerlessAirline", service="orders") # creates a default dimension {"service": "orders"} under the namespace "ServerlessAirline" ``` -You can initialize Metrics anywhere in your code as many time as you need - It'll keep track of your aggregate metrics in memory. +You can initialize Metrics anywhere in your code as many times as you need - It'll keep track of your aggregate metrics in memory. ## Creating metrics -You can create metrics using `add_metric`, and set dimensions for all your aggregate metrics using `add_dimension`. +You can create metrics using `add_metric`, and manually create dimensions for all your aggregate metrics using `add_dimension`. ```python:title=app.py from aws_lambda_powertools.metrics import Metrics, MetricUnit -metrics = Metrics(service="ExampleService") +metrics = Metrics(namespace="ExampleApplication", service="booking") # highlight-start -metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) -metrics.add_dimension(name="service", value="booking") +metrics.add_dimension(name="environment", value="prod") +metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) # highlight-end ``` @@ -79,7 +81,7 @@ CloudWatch EMF uses the same dimensions across all your metrics. Use `single_met ```python:title=single_metric.py from aws_lambda_powertools.metrics import MetricUnit, single_metric -with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, service="ExampleService") as metric: # highlight-line +with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace="ExampleApplication") as metric: # highlight-line metric.add_dimension(name="function_context", value="$LATEST") ... ``` @@ -115,7 +117,7 @@ def lambda_handler(evt, ctx): ```python:title=lambda_handler_nested_middlewares.py from aws_lambda_powertools.metrics import Metrics, MetricUnit -metrics = Metrics(service="ExampleService") +metrics = Metrics(namespace="ExampleApplication", service="booking") metrics.add_metric(name="ColdStart", unit="Count", value=1) # highlight-start @@ -123,7 +125,6 @@ metrics.add_metric(name="ColdStart", unit="Count", value=1) @tracer.capture_lambda_handler # highlight-end def lambda_handler(evt, ctx): - metrics.add_dimension(name="service", value="booking") metrics.add_metric(name="BookingConfirmation", unit="Count", value=1) ... ``` @@ -136,9 +137,8 @@ If you prefer not to use `log_metrics` because you might want to encapsulate add import json from aws_lambda_powertools.metrics import Metrics, MetricUnit -metrics = Metrics(service="ExampleService") +metrics = Metrics(namespace="ExampleApplication", service="booking") metrics.add_metric(name="ColdStart", unit="Count", value=1) -metrics.add_dimension(name="service", value="booking") # highlight-start your_metrics_object = metrics.serialize_metric_set() @@ -149,10 +149,11 @@ print(json.dumps(your_metrics_object)) ## Testing your code -Use `POWERTOOLS_SERVICE_NAME` env var when unit testing your code to ensure a metric namespace object is created, and your code doesn't fail validation. +Use `POWERTOOLS_METRICS_NAMESPACE` and `POWERTOOLS_SERVICE_NAME` env vars when unit testing your code to ensure metric namespace and dimension objects are created, and your code doesn't fail validation. ```bash:title=pytest_metric_namespace.sh -POWERTOOLS_SERVICE_NAME="Example" python -m pytest + +POWERTOOLS_SERVICE_NAME="Example" POWERTOOLS_METRICS_NAMESPACE="Application" python -m pytest ``` -You can ignore this if you are explicitly setting namespace by passing a service name when initializing Metrics: `metrics = Metrics(service=ServiceName)`. +You can ignore this if you are explicitly setting namespace/default dimension by passing the `namespace` and `service` parameters when initializing Metrics: `metrics = Metrics(namespace=ApplicationName, service=ServiceName)`. diff --git a/docs/content/index.mdx b/docs/content/index.mdx index 3bd43003e91..9966a08deb3 100644 --- a/docs/content/index.mdx +++ b/docs/content/index.mdx @@ -36,7 +36,8 @@ _`*` Core utilities are Tracer, Logger and Metrics. Optional utilities may vary Environment variable | Description | Utility ------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | ------------------------------------------------- -**POWERTOOLS_SERVICE_NAME** | Sets service name used for tracing namespace, metrics namespace and structured logging | all +**POWERTOOLS_SERVICE_NAME** | Sets service name used for tracing namespace, metrics dimension and structured logging | all +**POWERTOOLS_METRICS_NAMESPACE** | Sets namespace used for metrics | [Metrics](./core/metrics) **POWERTOOLS_TRACE_DISABLED** | Disables tracing | [Tracing](./core/tracer) **POWERTOOLS_TRACE_MIDDLEWARES** | Creates sub-segment for each custom middleware | [middleware_factory](./utilities/middleware_factory) **POWERTOOLS_LOGGER_LOG_EVENT** | Logs incoming event | [Logging](./core/logger) diff --git a/example/README.md b/example/README.md index 8250c6d1b65..a54ab6ba3c3 100644 --- a/example/README.md +++ b/example/README.md @@ -1,6 +1,6 @@ # Summary -This example uses both [tracing](https://github.com/awslabs/aws-lambda-powertools/tree/develop/python#tracing) and [logging](https://github.com/awslabs/aws-lambda-powertools/tree/develop/python#logging) features, includes all environment variables that can be used, and demonstrates how to explicitly disable tracing while running unit tests - That is not necessary when running within SAM CLI as it detects the local env automatically. +This example uses [tracer](https://awslabs.github.io/aws-lambda-powertools-python/core/tracer/), [metrics](https://awslabs.github.io/aws-lambda-powertools-python/core/metrics/),and [logger](https://awslabs.github.io/aws-lambda-powertools-python/core/logger/) features, includes all environment variables that can be used, and demonstrates how to explicitly disable tracing while running unit tests - That is not necessary when running within SAM CLI as it detects the local env automatically. **Quick commands** @@ -9,9 +9,9 @@ This example uses both [tracing](https://github.com/awslabs/aws-lambda-powertool * **Deploy**: `sam deploy --guided` * **Unit Tests**: We recommend proceeding with the following commands in a virtual environment - **Install deps**: `pip install -r hello_world/requirements.txt && pip install -r requirements-dev.txt` - - **Run tests with tracing disabled and namespace set** - - `POWERTOOLS_SERVICE_NAME="Example" POWERTOOLS_TRACE_DISABLED=1 python -m pytest` - - Both are necessary because `app.py` initializes them in the global scope, since both Tracer and Metrics will be initialized and configured during import time. For unit tests, we could always patch and explicitly config but env vars do just fine for this example. + - **Run tests with namespace and service set, and tracing disabled** + - `POWERTOOLS_METRICS_NAMESPACE="Example" POWERTOOLS_SERVICE_NAME="Example" POWERTOOLS_TRACE_DISABLED=1 python -m pytest` + - These are necessary because `app.py` initializes them in the global scope, since both Tracer and Metrics will be initialized and configured during import time. For unit tests, we could always patch and explicitly config but env vars do just fine for this example. # Example code @@ -118,7 +118,7 @@ Tests are defined in the `tests` folder in this project. Use PIP to install the ```bash example$ pip install -r hello_world/requirements.txt example$ pip install -r requirements-dev.txt -example$ POWERTOOLS_TRACE_DISABLED=1 python -m pytest tests/ -v +example$ pytest -v ``` ## Cleanup diff --git a/example/template.yaml b/example/template.yaml index 17a02592a9d..47267d729f5 100644 --- a/example/template.yaml +++ b/example/template.yaml @@ -24,6 +24,7 @@ Resources: POWERTOOLS_TRACE_DISABLED: "false" # Explicitly disables tracing, default POWERTOOLS_LOGGER_LOG_EVENT: "false" # Logs incoming event, default POWERTOOLS_LOGGER_SAMPLE_RATE: "0" # Debug log sampling percentage, default + POWERTOOLS_METRICS_NAMESPACE: "Example" # Metric Namespace LOG_LEVEL: INFO # Log level for Logger (INFO, DEBUG, etc.), default Events: HelloWorld: diff --git a/example/tests/__init__.py b/example/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/example/tests/test_handler.py b/example/tests/test_handler.py index fd2586f4cad..ff40bff1922 100644 --- a/example/tests/test_handler.py +++ b/example/tests/test_handler.py @@ -1,9 +1,24 @@ import json +import os +import sys from dataclasses import dataclass import pytest -from hello_world import app + +@pytest.fixture() +def env_vars(monkeypatch): + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", "example_namespace") + monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", "example_service") + monkeypatch.setenv("POWERTOOLS_TRACE_DISABLED", "1") + + +@pytest.fixture() +def lambda_handler(env_vars): + from hello_world import app + + return app.lambda_handler + @pytest.fixture() @@ -71,8 +86,9 @@ class Context: aws_request_id: str = "5b441b59-a550-11c8-6564-f1c833cf438c" -def test_lambda_handler(apigw_event, mocker, capsys): - ret = app.lambda_handler(apigw_event, Context()) +@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python3.7 or higher") +def test_lambda_handler(lambda_handler, apigw_event, mocker, capsys): + ret = lambda_handler(apigw_event, Context()) data = json.loads(ret["body"]) output = capsys.readouterr() diff --git a/poetry.lock b/poetry.lock index a7aa137484b..cf1a02b3337 100644 --- a/poetry.lock +++ b/poetry.lock @@ -241,11 +241,12 @@ toml = ["toml"] [[package]] category = "dev" -description = "Distribution utilities" -name = "distlib" +description = "A backport of the dataclasses module for Python 3.6" +marker = "python_version >= \"3.6\" and python_version < \"3.7\"" +name = "dataclasses" optional = false -python-versions = "*" -version = "0.3.0" +python-versions = ">=3.6, <3.7" +version = "0.7" [[package]] category = "main" @@ -984,8 +985,8 @@ docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] testing = ["jaraco.itertools", "func-timeout"] [metadata] -content-hash = "8b85f6f2239656df34d0c824dbcbc83390b063ef973bc794655e148ab8303463" -python-versions = "^3.8" +content-hash = "68009b53f884801ad967ab41923a811dd51d61c28979658545b1db8e7ee1588a" +python-versions = "^3.6" [metadata.files] aioboto3 = [ @@ -1103,8 +1104,9 @@ coverage = [ {file = "coverage-5.1-cp39-cp39-win_amd64.whl", hash = "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e"}, {file = "coverage-5.1.tar.gz", hash = "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052"}, ] -distlib = [ - {file = "distlib-0.3.0.zip", hash = "sha256:2e166e231a26b36d6dfe35a48c4464346620f8645ed0ace01ee31822b288de21"}, +dataclasses = [ + {file = "dataclasses-0.7-py3-none-any.whl", hash = "sha256:3459118f7ede7c8bea0fe795bff7c6c2ce287d01dd226202f7c9ebc0610a7836"}, + {file = "dataclasses-0.7.tar.gz", hash = "sha256:494a6dcae3b8bcf80848eea2ef64c0cc5cd307ffc263e17cdf42f3e5420808e6"}, ] docutils = [ {file = "docutils-0.15.2-py2-none-any.whl", hash = "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827"}, diff --git a/pyproject.toml b/pyproject.toml index 40c89590eb3..5cb355d16c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ radon = "^4.1.0" xenon = "^0.7.0" flake8-bugbear = "^20.1.4" flake8-eradicate = "^0.3.0" -pre-commit = "^2.4.0" +dataclasses = {version = "*", python = "~3.6"} [tool.coverage.run] source = ["aws_lambda_powertools"] diff --git a/pytest.ini b/pytest.ini index 45345cbd365..800a6cf0a8d 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,3 +1,3 @@ [pytest] addopts = -ra --cov --cov-config=.coveragerc -testpaths = ./tests +testpaths = ./tests ./example/tests \ No newline at end of file diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 50aacc734e5..25024b3cfbb 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -162,14 +162,14 @@ def lambda_handler(evt, ctx): def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): - # GIVEN we use POWERTOOLS_SERVICE_NAME - monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", namespace["name"]) + # GIVEN we use POWERTOOLS_METRICS_NAMESPACE + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace["name"]) # WHEN creating a metric but don't explicitly # add a namespace with single_metric(**metric) as my_metrics: my_metrics.add_dimension(**dimension) - monkeypatch.delenv("POWERTOOLS_SERVICE_NAME") + monkeypatch.delenv("POWERTOOLS_METRICS_NAMESPACE") output = json.loads(capsys.readouterr().out.strip()) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) @@ -177,10 +177,36 @@ def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): remove_timestamp(metrics=[output, expected]) # Timestamp will always be different # THEN we should add a namespace implicitly - # with the value of POWERTOOLS_SERVICE_NAME env var + # with the value of POWERTOOLS_METRICS_NAMESPACE env var assert expected["_aws"] == output["_aws"] +def test_service_env_var(monkeypatch, capsys, metric, namespace): + # GIVEN we use POWERTOOLS_SERVICE_NAME + monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", "test_service") + my_metrics = Metrics(namespace=namespace["name"]) + + # WHEN creating a metric but don't explicitly + # add a dimension + @my_metrics.log_metrics + def lambda_handler(evt, context): + my_metrics.add_metric(**metric) + return True + + lambda_handler({}, {}) + + monkeypatch.delenv("POWERTOOLS_SERVICE_NAME") + + output = json.loads(capsys.readouterr().out.strip()) + expected_dimension = {"name": "service", "value": "test_service"} + expected = serialize_single_metric(metric=metric, dimension=expected_dimension, namespace=namespace) + + remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + + # THEN metrics should be logged using the implicitly created "service" dimension + assert expected == output + + def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_hundred_metrics): # GIVEN Metrics is initialized and we have over a hundred metrics to add my_metrics = Metrics() @@ -243,8 +269,8 @@ def test_incorrect_metric_unit(metric, dimension, namespace): def test_schema_no_namespace(metric, dimension): - # GIVEN we don't add any metric or dimension - # but a namespace + # GIVEN we add any metric or dimension + # but no namespace # WHEN we attempt to serialize a valid EMF object # THEN it should fail validation and raise SchemaValidationError @@ -421,9 +447,9 @@ def test_add_namespace_warns_for_deprecation(capsys, metrics, dimensions, namesp my_metrics.add_namespace(**namespace) -def test_log_metrics_with_explicit_service(capsys, metrics, dimensions): +def test_log_metrics_with_explicit_namespace(capsys, metrics, dimensions, namespace): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service") + my_metrics = Metrics(service="test_service", namespace=namespace["name"]) for metric in metrics: my_metrics.add_metric(**metric) for dimension in dimensions: @@ -438,18 +464,76 @@ def lambda_handler(evt, ctx): lambda_handler({}, {}) output = json.loads(capsys.readouterr().out.strip()) - expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace={"name": "test_service"}) + + dimensions.insert(0, {"name": "service", "value": "test_service"}) + expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace) remove_timestamp(metrics=[output, expected]) # Timestamp will always be different # THEN we should have no exceptions and the namespace should be set to the name provided in the # service passed to Metrics constructor - assert expected["_aws"] == output["_aws"] + assert expected == output -def test_log_metrics_with_namespace_overridden(capsys, metrics, dimensions): +def test_log_metrics_with_implicit_dimensions(capsys, metrics): + # GIVEN Metrics is initialized with service specified + my_metrics = Metrics(service="test_service", namespace="test_application") + for metric in metrics: + my_metrics.add_metric(**metric) + + # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions + @my_metrics.log_metrics + def lambda_handler(evt, ctx): + return True + + lambda_handler({}, {}) + + output = json.loads(capsys.readouterr().out.strip()) + + expected_dimensions = [{"name": "service", "value": "test_service"}] + expected = serialize_metrics( + metrics=metrics, dimensions=expected_dimensions, namespace={"name": "test_application"} + ) + + remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + + # THEN we should have no exceptions and the dimensions should be set to the name provided in the + # service passed to Metrics constructor + assert expected == output + + +def test_log_metrics_with_renamed_service(capsys, metrics): # GIVEN Metrics is initialized with service specified - my_metrics = Metrics(service="test_service") + my_metrics = Metrics(service="test_service", namespace="test_application") + for metric in metrics: + my_metrics.add_metric(**metric) + + # WHEN we manually call add_dimension to change the value of the service dimension + my_metrics.add_dimension(name="service", value="another_test_service") + + @my_metrics.log_metrics + def lambda_handler(evt, ctx): + return True + + lambda_handler({}, {}) + + output = json.loads(capsys.readouterr().out.strip()) + + expected_dimensions = [{"name": "service", "value": "test_service"}] + expected = serialize_metrics( + metrics=metrics, dimensions=expected_dimensions, namespace={"name": "test_application"} + ) + + remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + + # THEN we should have no exceptions and the dimensions should be set to the name provided in the + # add_dimension call + assert output["service"] == "another_test_service" + + +def test_log_metrics_with_namespace_overridden(capsys, metrics, dimensions): + # GIVEN Metrics is initialized with namespace specified + my_metrics = Metrics(namespace="test_service") for metric in metrics: my_metrics.add_metric(**metric) for dimension in dimensions: @@ -470,10 +554,10 @@ def lambda_handler(evt, ctx): def test_single_metric_with_service(capsys, metric, dimension): - # GIVEN we pass service parameter to single_metric + # GIVEN we pass namespace parameter to single_metric # WHEN creating a metric - with single_metric(**metric, service="test_service") as my_metrics: + with single_metric(**metric, namespace="test_service") as my_metrics: my_metrics.add_dimension(**dimension) output = json.loads(capsys.readouterr().out.strip()) @@ -485,17 +569,17 @@ def test_single_metric_with_service(capsys, metric, dimension): assert expected["_aws"] == output["_aws"] -def test_namespace_var_precedence(monkeypatch, capsys, metric, dimension): - # GIVEN we use POWERTOOLS_SERVICE_NAME - monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", "test_service_env_var") +def test_namespace_var_precedence(monkeypatch, capsys, metric, dimension, namespace): + # GIVEN we use POWERTOOLS_METRICS_NAMESPACE + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace["name"]) - # WHEN creating a metric and explicitly set a service name - with single_metric(**metric, service="test_service_explicit") as my_metrics: + # WHEN creating a metric and explicitly set a namespace + with single_metric(**metric, namespace=namespace["name"]) as my_metrics: my_metrics.add_dimension(**dimension) - monkeypatch.delenv("POWERTOOLS_SERVICE_NAME") + monkeypatch.delenv("POWERTOOLS_METRICS_NAMESPACE") output = json.loads(capsys.readouterr().out.strip()) - expected = serialize_single_metric(metric=metric, dimension=dimension, namespace={"name": "test_service_explicit"}) + expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) remove_timestamp(metrics=[output, expected]) # Timestamp will always be different