Skip to content

Commit

Permalink
otel-collector config to populate FAAS name (#42)
Browse files Browse the repository at this point in the history
  • Loading branch information
tcnghia authored Dec 24, 2023
1 parent 100bc25 commit 4aba832
Showing 1 changed file with 14 additions and 7 deletions.
21 changes: 14 additions & 7 deletions otel-collector/cmd/otel-collector/kodata/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ receivers:
static_configs:
# TODO: make this configurable
- targets: ["localhost:2112"]
# Do not relabel job and instance labels if existed.
honor_labels: true
metric_relabel_configs:
- source_labels: [ __name__ ]
regex: '^prometheus_.*'
Expand Down Expand Up @@ -39,17 +41,22 @@ processors:

resource:
attributes:
# add instance_id as a resource attribute
- key: service.instance.id
from_attribute: faas.id
# The `gcp` resourcedetection processor sets `faas.name` to the name of the
# Cloud Run service or the Cloud Run job.
- from_attribute: faas.name
# The googlemanagedprometheus exporter consumes `service.name` attribute
# and set the `job` resource label to this value. (See
# https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/pull/764)
key: "service.name"
action: upsert
# parse service name from K_SERVICE Cloud Run variable
- key: service.name
value: ${env:K_SERVICE}
action: insert

exporters:
googlemanagedprometheus:
sending_queue:
enabled: true
# we are handling metrics for a single pod, no need to have
# too many senders. this will also avoid out-of-order data.
num_consumers: 1

extensions:
health_check:
Expand Down

0 comments on commit 4aba832

Please sign in to comment.