Skip to content

Commit

Permalink
Agent integrations module
Browse files Browse the repository at this point in the history
Signed-off-by: Weifeng Wang <[email protected]>

Agent integrations module

Signed-off-by: Weifeng Wang <[email protected]>

agent set clustering = true

Signed-off-by: Weifeng Wang <[email protected]>
  • Loading branch information
qclaogui committed Mar 18, 2024
1 parent 91d198b commit 87f18ff
Show file tree
Hide file tree
Showing 12 changed files with 147 additions and 130 deletions.
1 change: 0 additions & 1 deletion docker-compose/common/config/agent-flow/metrics.river
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,5 @@ module.file "metrics_primary" {

arguments {
forward_to = [module.file.lgtmp_provider_local.exports.metrics_receiver]
clustering = true
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,22 @@ argument "keep_labels" {
]
}

// get the all available containers.
discovery.docker "dd_logs" {
host = "unix:///var/run/docker.sock"
refresh_interval = "15s"

filter {
name = "status"
values = ["running"]
}
}

module.file "mf_logs_auto_scrape" {
filename = env("AGENT_CONFIG_FOLDER") + "/modules/docker/logs/logs-auto-scrape.river"

arguments {
targets = discovery.docker.dd_logs.targets
forward_to = [module.file.mf_log_formats_all.exports.process.receiver]
tenant = argument.tenant.value
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@ argument "forward_to" {
comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
}

argument "targets" {
comment = "The running containers."
}

argument "cluster" {
optional = true
}
Expand All @@ -34,23 +38,6 @@ argument "tenant" {
optional = true
}

argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: false)"
optional = true
}

// get the available containers.
discovery.docker "dd_logs" {
host = "unix:///var/run/docker.sock"
refresh_interval = "15s"

filter {
name = "status"
values = ["running"]
}
}

// get logs from discovery relabel dr_docker_logs below
loki.source.docker "lsd_docker_logs" {
forward_to = argument.forward_to.value
Expand All @@ -62,7 +49,7 @@ loki.source.docker "lsd_docker_logs" {
}

discovery.relabel "dr_docker_logs" {
targets = discovery.docker.dd_logs.targets
targets = argument.targets.value

/****************************************************************************************************************
* Handle Discovers From Docker Engine Containers Targets to Keep or Drop
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,66 +11,43 @@ argument "tenant" {
optional = true
}

argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: true)"
// get the all available containers.
discovery.docker "dd_metrics" {
host = "unix:///var/run/docker.sock"
refresh_interval = "15s"

filter {
name = "status"
values = ["running"]
}
}

module.file "mf_metrics_auto_scrape" {
filename = coalesce(env("AGENT_CONFIG_FOLDER"), "/etc/agent-config") + "/modules/docker/metrics/metrics-auto-scrape.river"
module.file "mf_label_auto_scrape" {
filename = coalesce(env("AGENT_CONFIG_FOLDER"), "/etc/agent-config") + "/modules/docker/metrics/label-auto-scrape.river"

arguments {
forward_to = argument.forward_to.value
tenant = coalesce(argument.tenant.value, ".*")
clustering = coalesce(argument.clustering.value, "true")
targets = discovery.docker.dd_metrics.targets
forward_to = argument.forward_to.value
tenant = coalesce(argument.tenant.value, ".*")
scrape_interval = "15s"
}
}

module.file "mf_job_minio_scrape" {
filename = coalesce(env("AGENT_CONFIG_FOLDER"), "/etc/agent-config") + "/modules/docker/metrics/jobs/minio.river"

arguments {
targets = discovery.docker.dd_metrics.targets
forward_to = argument.forward_to.value
scrape_interval = "15s"
}
}

prometheus.exporter.unix "peu_containers" {
set_collectors = ["cpu"]
disable_collectors = ["diskstats", "mdadm", "textfile", "hwmon"]
}

prometheus.scrape "pc_integrations" {
forward_to = [prometheus.relabel.pr_integrations.receiver]
module.file "mf_job_integration_scrape" {
filename = coalesce(env("AGENT_CONFIG_FOLDER"), "/etc/agent-config") + "/modules/docker/metrics/jobs/integrations.river"

targets = concat(
prometheus.exporter.unix.peu_containers.targets,
)

enable_protobuf_negotiation = true
scrape_classic_histograms = true

scrape_interval = "15s"

clustering {
enabled = coalesce(argument.clustering.value, "true")
}
}

prometheus.relabel "pr_integrations" {
forward_to = argument.forward_to.value

rule {
source_labels = ["job"]
regex = "integrations/(.*)"
target_label = "pod"
replacement = "${2}"
}

rule {
source_labels = ["job"]
regex = "integrations/(.*)"
target_label = "container"
replacement = "${2}"
arguments {
forward_to = argument.forward_to.value
scrape_interval = "15s"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
Module: integration-job
Description: Wrapper module to include all Docker containers metric modules
*/
argument "forward_to" {
comment = "Must be a list(MetricssReceiver) where collected metrics should be forwarded to"
}

argument "scrape_interval" {
comment = "How often to scrape metrics from the targets (default: 60s)"
optional = true
}

argument "scrape_timeout" {
comment = "How long before a scrape times out (default: 10s)"
optional = true
}

/********************************************
* Integrations Node Exporter
********************************************/
prometheus.exporter.unix "peu_unix" {
set_collectors = ["cpu"]
disable_collectors = ["diskstats", "mdadm", "textfile", "hwmon"]
}

/********************************************
* Prometheus Scrape Integrations Targets
********************************************/
prometheus.scrape "ps_integrations" {
targets = concat(
prometheus.exporter.unix.peu_unix.targets,
)

enable_protobuf_negotiation = true
scrape_classic_histograms = true

scrape_interval = coalesce(argument.scrape_interval.value, "60s")
scrape_timeout = coalesce(argument.scrape_timeout.value, "10s")

clustering {
enabled = true
}

forward_to = [prometheus.relabel.pr_integrations.receiver]
}

/********************************************
* Prometheus Metric Relabelings (post-scrape)
********************************************/
prometheus.relabel "pr_integrations" {
forward_to = argument.forward_to.value

rule {
source_labels = ["job"]
regex = "integrations/(.*)"
target_label = "pod"
replacement = "${1}"
}

rule {
source_labels = ["job"]
regex = "integrations/(.*)"
target_label = "container"
replacement = "${1}"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,15 @@ These present metrics about the whole MinIO cluster.

*/
argument "forward_to" {
comment = "Must be a list(MetricsReceiver) where collected logs should be forwarded to"
comment = "Must be a list(MetricsReceiver) where collected metrics should be forwarded to"
}

argument "targets" {
comment = "The running containers."
}

argument "enabled" {
comment = "Whether or not the minio-job should be enabled (default: true)"
comment = "Whether or not the job should be enabled (default: true)"
optional = true
}

Expand Down Expand Up @@ -83,21 +87,11 @@ argument "scrape_timeout" {
optional = true
}

// minio service discovery for all of the containers in docker.
discovery.docker "dd_minio" {
host = "unix:///var/run/docker.sock"

filter {
name = "status"
values = ["running"]
}
}

/********************************************
* Minio Relabelings (pre-scrape)
* Discovery Relabelings (pre-scrape)
********************************************/
discovery.relabel "dr_minio" {
targets = discovery.docker.dd_minio.targets
targets = argument.targets.value

// drop all targets if enabled is false
rule {
Expand Down Expand Up @@ -261,7 +255,7 @@ discovery.relabel "dr_metrics_v3_cluster_erasure_set" {
}

/********************************************
* Minio Scrape Jobs
* Prometheus Scrape Jobs Targets
********************************************/
prometheus.scrape "ps_minio" {
targets = concat(
Expand Down Expand Up @@ -292,7 +286,7 @@ prometheus.scrape "ps_minio" {
}

/********************************************
* Minio Metric Relabelings (pre-scrape)
* Prometheus Metric Relabelings (post-scrape)
********************************************/
prometheus.relabel "pr_minio" {
forward_to = argument.forward_to.value
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ argument "forward_to" {
comment = "Must be a list(MetricsReceiver) where collected metrics should be forwarded to"
}

argument "targets" {
comment = "The running containers."
}

argument "cluster" {
optional = true
}
Expand All @@ -81,7 +85,7 @@ argument "drop_metrics" {
}

argument "scrape_interval" {
comment = "How often to scrape metrics from the targets (default: 15s)"
comment = "How often to scrape metrics from the targets (default: 60s)"
optional = true
}

Expand All @@ -90,25 +94,11 @@ argument "scrape_timeout" {
optional = true
}

argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: false)"
optional = true
}

// get the available containers.
discovery.docker "dd_metrics" {
host = "unix:///var/run/docker.sock"
refresh_interval = "15s"

filter {
name = "status"
values = ["running"]
}
}

discovery.relabel "dr_docker_metrics" {
targets = discovery.docker.dd_metrics.targets
/********************************************
* Discovery Relabelings (pre-scrape)
********************************************/
discovery.relabel "dr_label_metrics" {
targets = argument.targets.value

/****************************************************************************************************************
* Handle Discovers From Docker Engine Containers Targets to Keep or Drop
Expand Down Expand Up @@ -224,7 +214,7 @@ discovery.relabel "dr_docker_metrics" {
// metrics.agent.grafana.com/interval: 15s
rule {
action = "replace"
replacement = coalesce(argument.scrape_interval.value, "15s")
replacement = coalesce(argument.scrape_interval.value, "60s")
target_label = "__scrape_interval__"
}

Expand Down Expand Up @@ -345,7 +335,7 @@ discovery.relabel "dr_docker_metrics" {

// only keep http targets
discovery.relabel "dr_keep_http_targets" {
targets = discovery.relabel.dr_docker_metrics.output
targets = discovery.relabel.dr_label_metrics.output

rule {
action = "keep"
Expand All @@ -354,24 +344,31 @@ discovery.relabel "dr_keep_http_targets" {
}
}

// scrape http only targtets
/********************************************
* Prometheus Scrape Labels Targets
********************************************/
prometheus.scrape "pc_docker_metrics" {
forward_to = [prometheus.relabel.pr_docker_metrics.receiver]
targets = concat(
discovery.relabel.dr_keep_http_targets.output,
)

job_name = "label-metrics-http"
targets = discovery.relabel.dr_keep_http_targets.output
scheme = "http"
scrape_interval = coalesce(argument.scrape_interval.value, "1m")
job_name = "label-metrics"
scrape_interval = coalesce(argument.scrape_interval.value, "60s")
scrape_timeout = coalesce(argument.scrape_timeout.value, "10s")

enable_protobuf_negotiation = true
scrape_classic_histograms = true

clustering {
enabled = coalesce(argument.clustering.value, false)
enabled = true
}

forward_to = [prometheus.relabel.pr_docker_metrics.receiver]
}

/********************************************
* Prometheus Metric Relabelings (post-scrape)
********************************************/
// perform generic relabeling using keep_metrics and drop_metrics
prometheus.relabel "pr_docker_metrics" {
forward_to = argument.forward_to.value
Expand Down
Loading

0 comments on commit 87f18ff

Please sign in to comment.