Skip to content

Commit

Permalink
Agent integrations module
Browse files Browse the repository at this point in the history
Signed-off-by: Weifeng Wang <[email protected]>

agent set clustering = true

Signed-off-by: Weifeng Wang <[email protected]>
  • Loading branch information
qclaogui committed Mar 17, 2024
1 parent 91d198b commit 500358b
Show file tree
Hide file tree
Showing 11 changed files with 93 additions and 91 deletions.
1 change: 0 additions & 1 deletion docker-compose/common/config/agent-flow/metrics.river
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,5 @@ module.file "metrics_primary" {

arguments {
forward_to = [module.file.lgtmp_provider_local.exports.metrics_receiver]
clustering = true
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
Module: integrations-all
Description: Wrapper module to include all Docker containers metric modules
*/
argument "forward_to" {
comment = "Must be a list(MetricssReceiver) where collected metrics should be forwarded to"
}

argument "scrape_interval" {
comment = "How often to scrape metrics from the targets (default: 60s)"
optional = true
}

argument "scrape_timeout" {
comment = "How long before a scrape times out (default: 10s)"
optional = true
}

prometheus.exporter.unix "peu_unix" {
set_collectors = ["cpu"]
disable_collectors = ["diskstats", "mdadm", "textfile", "hwmon"]
}

prometheus.exporter.memcached "pem_memcached" {
address = "memcached:11211"
timeout = "5s"
}

/********************************************
* Prometheus Scrape Job
********************************************/
prometheus.scrape "ps_integrations" {
targets = concat(
prometheus.exporter.unix.peu_unix.targets,
)
//prometheus.exporter.memcached.pem_memcached.targets,

enable_protobuf_negotiation = true
scrape_classic_histograms = true

scrape_interval = coalesce(argument.scrape_interval.value, "60s")
scrape_timeout = coalesce(argument.scrape_timeout.value, "10s")

clustering {
enabled = true
}

forward_to = [prometheus.relabel.pr_integrations.receiver]
}

prometheus.relabel "pr_integrations" {
forward_to = argument.forward_to.value

rule {
source_labels = ["job"]
regex = "integrations/(.*)"
target_label = "pod"
replacement = "${2}"
}

rule {
source_labels = ["job"]
regex = "integrations/(.*)"
target_label = "container"
replacement = "${2}"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,6 @@ argument "tenant" {
optional = true
}

argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: false)"
optional = true
}

// get the available containers.
discovery.docker "dd_logs" {
host = "unix:///var/run/docker.sock"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,13 @@ argument "tenant" {
optional = true
}

argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: true)"
}

module.file "mf_metrics_auto_scrape" {
filename = coalesce(env("AGENT_CONFIG_FOLDER"), "/etc/agent-config") + "/modules/docker/metrics/metrics-auto-scrape.river"
module.file "mf_label_auto_scrape" {
filename = coalesce(env("AGENT_CONFIG_FOLDER"), "/etc/agent-config") + "/modules/docker/metrics/label-auto-scrape.river"

arguments {
forward_to = argument.forward_to.value
tenant = coalesce(argument.tenant.value, ".*")
clustering = coalesce(argument.clustering.value, "true")
forward_to = argument.forward_to.value
tenant = coalesce(argument.tenant.value, ".*")
scrape_interval = "15s"
}
}

Expand All @@ -35,42 +30,11 @@ module.file "mf_job_minio_scrape" {
}
}

prometheus.exporter.unix "peu_containers" {
set_collectors = ["cpu"]
disable_collectors = ["diskstats", "mdadm", "textfile", "hwmon"]
}

prometheus.scrape "pc_integrations" {
forward_to = [prometheus.relabel.pr_integrations.receiver]

targets = concat(
prometheus.exporter.unix.peu_containers.targets,
)

enable_protobuf_negotiation = true
scrape_classic_histograms = true

scrape_interval = "15s"

clustering {
enabled = coalesce(argument.clustering.value, "true")
}
}

prometheus.relabel "pr_integrations" {
forward_to = argument.forward_to.value
module.file "mf_integration_scrape" {
filename = coalesce(env("AGENT_CONFIG_FOLDER"), "/etc/agent-config") + "/modules/docker/integrations/all.river"

rule {
source_labels = ["job"]
regex = "integrations/(.*)"
target_label = "pod"
replacement = "${2}"
}

rule {
source_labels = ["job"]
regex = "integrations/(.*)"
target_label = "container"
replacement = "${2}"
arguments {
forward_to = argument.forward_to.value
scrape_interval = "15s"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,11 @@ These present metrics about the whole MinIO cluster.

*/
argument "forward_to" {
comment = "Must be a list(MetricsReceiver) where collected logs should be forwarded to"
comment = "Must be a list(MetricsReceiver) where collected metrics should be forwarded to"
}

argument "enabled" {
comment = "Whether or not the minio-job should be enabled (default: true)"
comment = "Whether or not the job should be enabled (default: true)"
optional = true
}

Expand Down Expand Up @@ -83,7 +83,7 @@ argument "scrape_timeout" {
optional = true
}

// minio service discovery for all of the containers in docker.
// service discovery for all of the containers in docker.
discovery.docker "dd_minio" {
host = "unix:///var/run/docker.sock"

Expand All @@ -94,7 +94,7 @@ discovery.docker "dd_minio" {
}

/********************************************
* Minio Relabelings (pre-scrape)
* Discovery Relabelings (pre-scrape)
********************************************/
discovery.relabel "dr_minio" {
targets = discovery.docker.dd_minio.targets
Expand Down Expand Up @@ -261,7 +261,7 @@ discovery.relabel "dr_metrics_v3_cluster_erasure_set" {
}

/********************************************
* Minio Scrape Jobs
* Prometheus Scrape Jobs
********************************************/
prometheus.scrape "ps_minio" {
targets = concat(
Expand Down Expand Up @@ -292,7 +292,7 @@ prometheus.scrape "ps_minio" {
}

/********************************************
* Minio Metric Relabelings (pre-scrape)
* Prometheus Metric Relabelings (post-scrape)
********************************************/
prometheus.relabel "pr_minio" {
forward_to = argument.forward_to.value
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ argument "drop_metrics" {
}

argument "scrape_interval" {
comment = "How often to scrape metrics from the targets (default: 15s)"
comment = "How often to scrape metrics from the targets (default: 60s)"
optional = true
}

Expand All @@ -90,12 +90,6 @@ argument "scrape_timeout" {
optional = true
}

argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: false)"
optional = true
}

// get the available containers.
discovery.docker "dd_metrics" {
host = "unix:///var/run/docker.sock"
Expand Down Expand Up @@ -224,7 +218,7 @@ discovery.relabel "dr_docker_metrics" {
// metrics.agent.grafana.com/interval: 15s
rule {
action = "replace"
replacement = coalesce(argument.scrape_interval.value, "15s")
replacement = coalesce(argument.scrape_interval.value, "60s")
target_label = "__scrape_interval__"
}

Expand Down Expand Up @@ -361,14 +355,14 @@ prometheus.scrape "pc_docker_metrics" {
job_name = "label-metrics-http"
targets = discovery.relabel.dr_keep_http_targets.output
scheme = "http"
scrape_interval = coalesce(argument.scrape_interval.value, "1m")
scrape_interval = coalesce(argument.scrape_interval.value, "60s")
scrape_timeout = coalesce(argument.scrape_timeout.value, "10s")

enable_protobuf_negotiation = true
scrape_classic_histograms = true

clustering {
enabled = coalesce(argument.clustering.value, false)
enabled = true
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,11 @@ argument "tenant" {
optional = true
}

argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: true)"
}

module.file "mf_profiles_auto_scrape" {
filename = coalesce(env("AGENT_CONFIG_FOLDER"), "/etc/agent-config") + "/modules/docker/profiles/profiles-auto-scrape.river"

arguments {
forward_to = argument.forward_to.value
tenant = coalesce(argument.tenant.value, ".*")
clustering = coalesce(argument.clustering.value, "true")
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,6 @@ argument "scrape_timeout" {
optional = true
}

argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: false)"
optional = true
}

// get the available containers.
discovery.docker "dd_profiles" {
host = "unix:///var/run/docker.sock"
Expand Down Expand Up @@ -296,7 +290,7 @@ pyroscope.scrape "ps_profile_cpu" {
targets = discovery.relabel.dr_keep_cpu_targets.output

clustering {
enabled = coalesce(argument.clustering.value, true)
enabled = true
}

profiling_config {
Expand Down Expand Up @@ -361,7 +355,7 @@ pyroscope.scrape "ps_profile_memory" {
targets = discovery.relabel.dr_keep_memory_targets.output

clustering {
enabled = coalesce(argument.clustering.value, true)
enabled = true
}

profiling_config {
Expand Down Expand Up @@ -426,7 +420,7 @@ pyroscope.scrape "ps_profile_goroutine" {
targets = discovery.relabel.dr_keep_goroutine_targets.output

clustering {
enabled = coalesce(argument.clustering.value, true)
enabled = true
}

profiling_config {
Expand Down Expand Up @@ -491,7 +485,7 @@ pyroscope.scrape "ps_profile_block" {
targets = discovery.relabel.dr_keep_block_targets.output

clustering {
enabled = coalesce(argument.clustering.value, true)
enabled = true
}

profiling_config {
Expand Down Expand Up @@ -556,7 +550,7 @@ pyroscope.scrape "ps_profile_mutex" {
targets = discovery.relabel.dr_keep_mutex_targets.output

clustering {
enabled = coalesce(argument.clustering.value, true)
enabled = true
}

profiling_config {
Expand Down Expand Up @@ -621,7 +615,7 @@ pyroscope.scrape "ps_profile_fgprof" {
targets = discovery.relabel.dr_keep_fgprof_targets.output

clustering {
enabled = coalesce(argument.clustering.value, true)
enabled = true
}

profiling_config {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ module.file "metrics_primary" {

arguments {
forward_to = [module.file.lgtmp_provider_local.exports.metrics_receiver]
clustering = true
}
}

Expand Down Expand Up @@ -79,6 +78,5 @@ module.file "profiles_primary" {

arguments {
forward_to = [module.file.lgtmp_provider_local.exports.profiles_receiver]
clustering = true
}
}
1 change: 0 additions & 1 deletion docker-compose/common/config/agent-flow/profiles.river
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,5 @@ module.file "profiles_primary" {

arguments {
forward_to = [module.file.lgtmp_provider_local.exports.profiles_receiver]
clustering = true
}
}
1 change: 0 additions & 1 deletion docker-compose/common/config/agent-flow/traces.river
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ module.file "metrics_primary" {

arguments {
forward_to = [module.file.lgtmp_provider_local.exports.metrics_receiver]
clustering = true
}
}

Expand Down

0 comments on commit 500358b

Please sign in to comment.