Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add capture_cold_start_metric for log_metrics #67

Merged
merged 2 commits into from
Jun 8, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 28 additions & 3 deletions aws_lambda_powertools/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

logger = logging.getLogger(__name__)

is_cold_start = True


class Metrics(MetricManager):
"""Metrics create an EMF object with up to 100 metrics
Expand Down Expand Up @@ -80,7 +82,7 @@ def clear_metrics(self):
self.metric_set.clear()
self.dimension_set.clear()

def log_metrics(self, lambda_handler: Callable[[Any, Any], Any] = None):
def log_metrics(self, lambda_handler: Callable[[Any, Any], Any] = None, capture_cold_start_metric: bool = False):
"""Decorator to serialize and publish metrics at the end of a function execution.

Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler).
Expand All @@ -107,10 +109,18 @@ def handler(event, context)
Propagate error received
"""

# If handler is None we've been called with parameters
# Return a partial function with args filled
if lambda_handler is None:
logger.debug("Decorator called with parameters")
return functools.partial(self.log_metrics, capture_cold_start_metric=capture_cold_start_metric)

@functools.wraps(lambda_handler)
def decorate(*args, **kwargs):
def decorate(event, context):
try:
response = lambda_handler(*args, **kwargs)
response = lambda_handler(event, context)
if capture_cold_start_metric:
self.__add_cold_start_metric(context=context)
finally:
metrics = self.serialize_metric_set()
self.clear_metrics()
Expand All @@ -120,3 +130,18 @@ def decorate(*args, **kwargs):
return response

return decorate

def __add_cold_start_metric(self, context: Any):
"""Add cold start metric and function_name dimension

Parameters
----------
context : Any
Lambda context
"""
global is_cold_start
if is_cold_start:
logger.debug("Adding cold start metric and function_name dimension")
self.add_metric(name="ColdStart", value=1, unit="Count")
self.add_dimension(name="function_name", value=context.function_name)
is_cold_start = False
16 changes: 16 additions & 0 deletions docs/content/core/metrics.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,22 @@ print(json.dumps(your_metrics_object))
# highlight-end
```

## Capturing cold start metric

You can capture cold start metrics automatically with `log_metrics` via `capture_cold_start_metric` param.

```python:title=lambda_handler.py
from aws_lambda_powertools.metrics import Metrics, MetricUnit

metrics = Metrics(service="ExampleService")

@metrics.log_metrics(capture_cold_start_metric=True) # highlight-line
def lambda_handler(evt, ctx):
...
```

If it's a cold start, this feature will add a metric named `ColdStart` and a dimension named `function_name`.

## Testing your code

Use `POWERTOOLS_METRICS_NAMESPACE` and `POWERTOOLS_SERVICE_NAME` env vars when unit testing your code to ensure metric namespace and dimension objects are created, and your code doesn't fail validation.
Expand Down
48 changes: 48 additions & 0 deletions tests/functional/test_metrics.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
from collections import namedtuple
from typing import Any, Dict, List

import pytest
Expand Down Expand Up @@ -585,3 +586,50 @@ def test_namespace_var_precedence(monkeypatch, capsys, metric, dimension, namesp

# THEN namespace should match the explicitly passed variable and not the env var
assert expected["_aws"] == output["_aws"]


def test_emit_cold_start_metric(capsys, namespace):
# GIVEN Metrics is initialized
my_metrics = Metrics()
my_metrics.add_namespace(**namespace)

# WHEN log_metrics is used with capture_cold_start_metric
@my_metrics.log_metrics(capture_cold_start_metric=True)
def lambda_handler(evt, context):
return True

LambdaContext = namedtuple("LambdaContext", "function_name")
lambda_handler({}, LambdaContext("example_fn"))

output = json.loads(capsys.readouterr().out.strip())

# THEN ColdStart metric and function_name dimension should be logged
assert output["ColdStart"] == 1
assert output["function_name"] == "example_fn"


def test_emit_cold_start_metric_only_once(capsys, namespace, dimension, metric):
# GIVEN Metrics is initialized
my_metrics = Metrics()
my_metrics.add_namespace(**namespace)

# WHEN log_metrics is used with capture_cold_start_metric
# and handler is called more than once
@my_metrics.log_metrics(capture_cold_start_metric=True)
def lambda_handler(evt, context):
my_metrics.add_metric(**metric)
my_metrics.add_dimension(**dimension)

LambdaContext = namedtuple("LambdaContext", "function_name")
lambda_handler({}, LambdaContext("example_fn"))
capsys.readouterr().out.strip()

# THEN ColdStart metric and function_name dimension should be logged
# only once
lambda_handler({}, LambdaContext("example_fn"))

output = json.loads(capsys.readouterr().out.strip())

assert "ColdStart" not in output

assert "function_name" not in output