refactor(service_monitor): use job name prometheus for prometheus

required for built in grafana dashboards to work

!8
This commit is contained in:
2024-02-05 15:09:07 +09:30
parent a2c3daa44e
commit 486f2c4728
2 changed files with 33 additions and 31 deletions

View File

@ -24,7 +24,7 @@ spec:
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
max_over_time(prometheus_config_last_reload_successful{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) == 0
max_over_time(prometheus_config_last_reload_successful{job="prometheus",namespace="monitoring"}[5m]) == 0
for: 10m
labels:
severity: critical
@ -37,9 +37,9 @@ spec:
# Without min_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
(
predict_linear(prometheus_notifications_queue_length{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m], 60 * 30)
predict_linear(prometheus_notifications_queue_length{job="prometheus",namespace="monitoring"}[5m], 60 * 30)
>
min_over_time(prometheus_notifications_queue_capacity{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
min_over_time(prometheus_notifications_queue_capacity{job="prometheus",namespace="monitoring"}[5m])
)
for: 15m
labels:
@ -51,9 +51,9 @@ spec:
summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.
expr: |
(
rate(prometheus_notifications_errors_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
rate(prometheus_notifications_errors_total{job="prometheus",namespace="monitoring"}[5m])
/
rate(prometheus_notifications_sent_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
rate(prometheus_notifications_sent_total{job="prometheus",namespace="monitoring"}[5m])
)
* 100
> 1
@ -68,7 +68,7 @@ spec:
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) < 1
max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus",namespace="monitoring"}[5m]) < 1
for: 10m
labels:
severity: warning
@ -78,7 +78,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbreloadsfailing
summary: Prometheus has issues reloading blocks from disk.
expr: |
increase(prometheus_tsdb_reloads_failures_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[3h]) > 0
increase(prometheus_tsdb_reloads_failures_total{job="prometheus",namespace="monitoring"}[3h]) > 0
for: 4h
labels:
severity: warning
@ -88,7 +88,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbcompactionsfailing
summary: Prometheus has issues compacting blocks.
expr: |
increase(prometheus_tsdb_compactions_failed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[3h]) > 0
increase(prometheus_tsdb_compactions_failed_total{job="prometheus",namespace="monitoring"}[3h]) > 0
for: 4h
labels:
severity: warning
@ -99,12 +99,12 @@ spec:
summary: Prometheus is not ingesting samples.
expr: |
(
rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) <= 0
rate(prometheus_tsdb_head_samples_appended_total{job="prometheus",namespace="monitoring"}[5m]) <= 0
and
(
sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}) > 0
sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="prometheus",namespace="monitoring"}) > 0
or
sum without(rule_group) (prometheus_rule_group_rules{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}) > 0
sum without(rule_group) (prometheus_rule_group_rules{job="prometheus",namespace="monitoring"}) > 0
)
)
for: 10m
@ -116,7 +116,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusduplicatetimestamps
summary: Prometheus is dropping samples with duplicate timestamps.
expr: |
rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus",namespace="monitoring"}[5m]) > 0
for: 10m
labels:
severity: warning
@ -126,7 +126,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusoutofordertimestamps
summary: Prometheus drops samples with out-of-order timestamps.
expr: |
rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus",namespace="monitoring"}[5m]) > 0
for: 10m
labels:
severity: warning
@ -137,12 +137,12 @@ spec:
summary: Prometheus fails to send samples to remote storage.
expr: |
(
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]))
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus",namespace="monitoring"}[5m]))
/
(
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]))
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus",namespace="monitoring"}[5m]))
+
(rate(prometheus_remote_storage_succeeded_samples_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]))
(rate(prometheus_remote_storage_succeeded_samples_total{job="job="prometheus",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_total{job="prometheus",namespace="monitoring"}[5m]))
)
)
* 100
@ -159,9 +159,9 @@ spec:
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
(
max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus",namespace="monitoring"}[5m])
- ignoring(remote_name, url) group_right
max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus",namespace="monitoring"}[5m])
)
> 120
for: 15m
@ -169,16 +169,16 @@ spec:
severity: critical
- alert: PrometheusRemoteWriteDesiredShards
annotations:
description: Prometheus {{ `{{` }}$labels.namespace}}/{{ `{{` }}$labels.pod}} remote write desired shards calculation wants to run {{ `{{` }} $value }} shards for queue {{ `{{` }} $labels.remote_name}}:{{ `{{` }} $labels.url }}, which is more than the max of {{ `{{` }} printf `prometheus_remote_storage_shards_max{instance="%s",job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}` $labels.instance | query | first | value }}.
description: Prometheus {{ `{{` }}$labels.namespace}}/{{ `{{` }}$labels.pod}} remote write desired shards calculation wants to run {{ `{{` }} $value }} shards for queue {{ `{{` }} $labels.remote_name}}:{{ `{{` }} $labels.url }}, which is more than the max of {{ `{{` }} printf `prometheus_remote_storage_shards_max{instance="%s",job="prometheus",namespace="monitoring"}` $labels.instance | query | first | value }}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritedesiredshards
summary: Prometheus remote write desired shards calculation wants to run more than configured max shards.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
(
max_over_time(prometheus_remote_storage_shards_desired{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
max_over_time(prometheus_remote_storage_shards_desired{job="prometheus",namespace="monitoring"}[5m])
>
max_over_time(prometheus_remote_storage_shards_max{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
max_over_time(prometheus_remote_storage_shards_max{job="prometheus",namespace="monitoring"}[5m])
)
for: 15m
labels:
@ -189,7 +189,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusrulefailures
summary: Prometheus is failing rule evaluations.
expr: |
increase(prometheus_rule_evaluation_failures_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
increase(prometheus_rule_evaluation_failures_total{job="prometheus",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: critical
@ -199,7 +199,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusmissingruleevaluations
summary: Prometheus is missing rule evaluations due to slow rule group evaluation.
expr: |
increase(prometheus_rule_group_iterations_missed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
increase(prometheus_rule_group_iterations_missed_total{job="prometheus",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
@ -209,7 +209,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetlimithit
summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit.
expr: |
increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="prometheus",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
@ -219,7 +219,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuslabellimithit
summary: Prometheus has dropped targets because some scrape configs have exceeded the labels limit.
expr: |
increase(prometheus_target_scrape_pool_exceeded_label_limits_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
increase(prometheus_target_scrape_pool_exceeded_label_limits_total{job="prometheus",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
@ -229,7 +229,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapebodysizelimithit
summary: Prometheus has dropped some targets that exceeded body size limit.
expr: |
increase(prometheus_target_scrapes_exceeded_body_size_limit_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
increase(prometheus_target_scrapes_exceeded_body_size_limit_total{job="prometheus",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
@ -239,7 +239,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapesamplelimithit
summary: Prometheus has failed scrapes that have exceeded the configured sample limit.
expr: |
increase(prometheus_target_scrapes_exceeded_sample_limit_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
increase(prometheus_target_scrapes_exceeded_sample_limit_total{job="prometheus",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
@ -249,7 +249,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetsyncfailure
summary: Prometheus has failed to sync targets.
expr: |
increase(prometheus_target_sync_failed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[30m]) > 0
increase(prometheus_target_sync_failed_total{job="prometheus",namespace="monitoring"}[30m]) > 0
for: 5m
labels:
severity: critical
@ -259,7 +259,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheushighqueryload
summary: Prometheus is reaching its maximum capacity serving concurrent requests.
expr: |
avg_over_time(prometheus_engine_queries{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0.8
avg_over_time(prometheus_engine_queries{job="prometheus",namespace="monitoring"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus",namespace="monitoring"}[5m]) > 0.8
for: 15m
labels:
severity: warning
@ -270,9 +270,9 @@ spec:
summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager.
expr: |
min without (alertmanager) (
rate(prometheus_notifications_errors_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring",alertmanager!~``}[5m])
rate(prometheus_notifications_errors_total{job="prometheus",namespace="monitoring",alertmanager!~``}[5m])
/
rate(prometheus_notifications_sent_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring",alertmanager!~``}[5m])
rate(prometheus_notifications_sent_total{job="prometheus",namespace="monitoring",alertmanager!~``}[5m])
)
* 100
> 3

View File

@ -22,6 +22,8 @@ spec:
sourceLabels:
- __meta_kubernetes_pod_name
targetLabel: instance
- targetLabel: "job"
replacement: "prometheus"
- interval: 30s
port: reloader-web
selector: