Skip to content

Commit

Permalink
Remove redundant prometheus/alertmanager rules
Browse files Browse the repository at this point in the history
With the alerts of the added prometheus-exporter mixin some alerts
became redundant and are removed.

Signed-off-by: Jan Horstmann <[email protected]>
  • Loading branch information
janhorstmann committed Nov 8, 2024
1 parent 5abedaf commit 299b20e
Showing 1 changed file with 1 addition and 100 deletions.
101 changes: 1 addition & 100 deletions prometheus/prometheus-extra.rules
Original file line number Diff line number Diff line change
Expand Up @@ -40,17 +40,8 @@ groups:
summary: Prometheus target missing with warmup time (instance {{ $labels.instance }})
description: "Allow a job time to start up (10 minutes) before alerting that it's down.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusConfigurationReloadFailure
expr: 'prometheus_config_last_reload_successful != 1'
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTooManyRestarts
expr: 'changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2'
expr: 'changes(process_start_time_seconds{job=~"prometheus|pushgateway"}[15m]) > 2'
for: 0m
labels:
severity: warning
Expand All @@ -67,24 +58,6 @@ groups:
summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusAlertmanagerConfigurationReloadFailure
expr: 'alertmanager_config_last_reload_successful != 1'
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusAlertmanagerConfigNotSynced
expr: 'count(count_values("config_hash", alertmanager_config_hash)) > 1'
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus AlertManager config not synced (instance {{ $labels.instance }})
description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusAlertmanagerE2eDeadManSwitch
expr: 'vector(1)'
for: 0m
Expand All @@ -94,24 +67,6 @@ groups:
summary: Prometheus AlertManager E2E dead man switch (instance {{ $labels.instance }})
description: "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusNotConnectedToAlertmanager
expr: 'prometheus_notifications_alertmanagers_discovered < 1'
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusRuleEvaluationFailures
expr: 'increase(prometheus_rule_evaluation_failures_total[3m]) > 0'
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTemplateTextExpansionFailures
expr: 'increase(prometheus_template_text_expansion_failures_total[3m]) > 0'
for: 0m
Expand All @@ -130,24 +85,6 @@ groups:
summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusNotificationsBacklog
expr: 'min_over_time(prometheus_notifications_queue_length[10m]) > 0'
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus notifications backlog (instance {{ $labels.instance }})
description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusAlertmanagerNotificationFailing
expr: 'rate(alertmanager_notifications_failed_total[1m]) > 0'
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTargetEmpty
expr: 'prometheus_sd_discovered_targets == 0'
for: 0m
Expand All @@ -166,24 +103,6 @@ groups:
summary: Prometheus target scraping slow (instance {{ $labels.instance }})
description: "Prometheus is scraping exporters slowly since it exceeded the requested interval time. Your Prometheus server is under-provisioned.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusLargeScrape
expr: 'increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10'
for: 5m
labels:
severity: warning
annotations:
summary: Prometheus large scrape (instance {{ $labels.instance }})
description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTargetScrapeDuplicate
expr: 'increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0'
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTsdbCheckpointCreationFailures
expr: 'increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0'
for: 0m
Expand All @@ -202,15 +121,6 @@ groups:
summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTsdbCompactionsFailed
expr: 'increase(prometheus_tsdb_compactions_failed_total[1m]) > 0'
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTsdbHeadTruncationsFailed
expr: 'increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0'
for: 0m
Expand All @@ -220,15 +130,6 @@ groups:
summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTsdbReloadFailures
expr: 'increase(prometheus_tsdb_reloads_failures_total[1m]) > 0'
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

- alert: PrometheusTsdbWalCorruptions
expr: 'increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0'
for: 0m
Expand Down

0 comments on commit 299b20e

Please sign in to comment.