refactor(service_monitor): use job name prometheus for prometheus
required for built in grafana dashboards to work !8
This commit is contained in:
@ -24,7 +24,7 @@ spec:
|
|||||||
expr: |
|
expr: |
|
||||||
# Without max_over_time, failed scrapes could create false negatives, see
|
# Without max_over_time, failed scrapes could create false negatives, see
|
||||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||||
max_over_time(prometheus_config_last_reload_successful{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) == 0
|
max_over_time(prometheus_config_last_reload_successful{job="prometheus",namespace="monitoring"}[5m]) == 0
|
||||||
for: 10m
|
for: 10m
|
||||||
labels:
|
labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
@ -37,9 +37,9 @@ spec:
|
|||||||
# Without min_over_time, failed scrapes could create false negatives, see
|
# Without min_over_time, failed scrapes could create false negatives, see
|
||||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||||
(
|
(
|
||||||
predict_linear(prometheus_notifications_queue_length{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m], 60 * 30)
|
predict_linear(prometheus_notifications_queue_length{job="prometheus",namespace="monitoring"}[5m], 60 * 30)
|
||||||
>
|
>
|
||||||
min_over_time(prometheus_notifications_queue_capacity{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
|
min_over_time(prometheus_notifications_queue_capacity{job="prometheus",namespace="monitoring"}[5m])
|
||||||
)
|
)
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
@ -51,9 +51,9 @@ spec:
|
|||||||
summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.
|
summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.
|
||||||
expr: |
|
expr: |
|
||||||
(
|
(
|
||||||
rate(prometheus_notifications_errors_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
|
rate(prometheus_notifications_errors_total{job="prometheus",namespace="monitoring"}[5m])
|
||||||
/
|
/
|
||||||
rate(prometheus_notifications_sent_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
|
rate(prometheus_notifications_sent_total{job="prometheus",namespace="monitoring"}[5m])
|
||||||
)
|
)
|
||||||
* 100
|
* 100
|
||||||
> 1
|
> 1
|
||||||
@ -68,7 +68,7 @@ spec:
|
|||||||
expr: |
|
expr: |
|
||||||
# Without max_over_time, failed scrapes could create false negatives, see
|
# Without max_over_time, failed scrapes could create false negatives, see
|
||||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||||
max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) < 1
|
max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus",namespace="monitoring"}[5m]) < 1
|
||||||
for: 10m
|
for: 10m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -78,7 +78,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbreloadsfailing
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbreloadsfailing
|
||||||
summary: Prometheus has issues reloading blocks from disk.
|
summary: Prometheus has issues reloading blocks from disk.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_tsdb_reloads_failures_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[3h]) > 0
|
increase(prometheus_tsdb_reloads_failures_total{job="prometheus",namespace="monitoring"}[3h]) > 0
|
||||||
for: 4h
|
for: 4h
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -88,7 +88,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbcompactionsfailing
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbcompactionsfailing
|
||||||
summary: Prometheus has issues compacting blocks.
|
summary: Prometheus has issues compacting blocks.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_tsdb_compactions_failed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[3h]) > 0
|
increase(prometheus_tsdb_compactions_failed_total{job="prometheus",namespace="monitoring"}[3h]) > 0
|
||||||
for: 4h
|
for: 4h
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -99,12 +99,12 @@ spec:
|
|||||||
summary: Prometheus is not ingesting samples.
|
summary: Prometheus is not ingesting samples.
|
||||||
expr: |
|
expr: |
|
||||||
(
|
(
|
||||||
rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) <= 0
|
rate(prometheus_tsdb_head_samples_appended_total{job="prometheus",namespace="monitoring"}[5m]) <= 0
|
||||||
and
|
and
|
||||||
(
|
(
|
||||||
sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}) > 0
|
sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="prometheus",namespace="monitoring"}) > 0
|
||||||
or
|
or
|
||||||
sum without(rule_group) (prometheus_rule_group_rules{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}) > 0
|
sum without(rule_group) (prometheus_rule_group_rules{job="prometheus",namespace="monitoring"}) > 0
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
for: 10m
|
for: 10m
|
||||||
@ -116,7 +116,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusduplicatetimestamps
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusduplicatetimestamps
|
||||||
summary: Prometheus is dropping samples with duplicate timestamps.
|
summary: Prometheus is dropping samples with duplicate timestamps.
|
||||||
expr: |
|
expr: |
|
||||||
rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
|
rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus",namespace="monitoring"}[5m]) > 0
|
||||||
for: 10m
|
for: 10m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -126,7 +126,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusoutofordertimestamps
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusoutofordertimestamps
|
||||||
summary: Prometheus drops samples with out-of-order timestamps.
|
summary: Prometheus drops samples with out-of-order timestamps.
|
||||||
expr: |
|
expr: |
|
||||||
rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
|
rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus",namespace="monitoring"}[5m]) > 0
|
||||||
for: 10m
|
for: 10m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -137,12 +137,12 @@ spec:
|
|||||||
summary: Prometheus fails to send samples to remote storage.
|
summary: Prometheus fails to send samples to remote storage.
|
||||||
expr: |
|
expr: |
|
||||||
(
|
(
|
||||||
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]))
|
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus",namespace="monitoring"}[5m]))
|
||||||
/
|
/
|
||||||
(
|
(
|
||||||
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]))
|
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus",namespace="monitoring"}[5m]))
|
||||||
+
|
+
|
||||||
(rate(prometheus_remote_storage_succeeded_samples_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]))
|
(rate(prometheus_remote_storage_succeeded_samples_total{job="job="prometheus",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_total{job="prometheus",namespace="monitoring"}[5m]))
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
* 100
|
* 100
|
||||||
@ -159,9 +159,9 @@ spec:
|
|||||||
# Without max_over_time, failed scrapes could create false negatives, see
|
# Without max_over_time, failed scrapes could create false negatives, see
|
||||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||||
(
|
(
|
||||||
max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
|
max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus",namespace="monitoring"}[5m])
|
||||||
- ignoring(remote_name, url) group_right
|
- ignoring(remote_name, url) group_right
|
||||||
max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
|
max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus",namespace="monitoring"}[5m])
|
||||||
)
|
)
|
||||||
> 120
|
> 120
|
||||||
for: 15m
|
for: 15m
|
||||||
@ -169,16 +169,16 @@ spec:
|
|||||||
severity: critical
|
severity: critical
|
||||||
- alert: PrometheusRemoteWriteDesiredShards
|
- alert: PrometheusRemoteWriteDesiredShards
|
||||||
annotations:
|
annotations:
|
||||||
description: Prometheus {{ `{{` }}$labels.namespace}}/{{ `{{` }}$labels.pod}} remote write desired shards calculation wants to run {{ `{{` }} $value }} shards for queue {{ `{{` }} $labels.remote_name}}:{{ `{{` }} $labels.url }}, which is more than the max of {{ `{{` }} printf `prometheus_remote_storage_shards_max{instance="%s",job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}` $labels.instance | query | first | value }}.
|
description: Prometheus {{ `{{` }}$labels.namespace}}/{{ `{{` }}$labels.pod}} remote write desired shards calculation wants to run {{ `{{` }} $value }} shards for queue {{ `{{` }} $labels.remote_name}}:{{ `{{` }} $labels.url }}, which is more than the max of {{ `{{` }} printf `prometheus_remote_storage_shards_max{instance="%s",job="prometheus",namespace="monitoring"}` $labels.instance | query | first | value }}.
|
||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritedesiredshards
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritedesiredshards
|
||||||
summary: Prometheus remote write desired shards calculation wants to run more than configured max shards.
|
summary: Prometheus remote write desired shards calculation wants to run more than configured max shards.
|
||||||
expr: |
|
expr: |
|
||||||
# Without max_over_time, failed scrapes could create false negatives, see
|
# Without max_over_time, failed scrapes could create false negatives, see
|
||||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||||
(
|
(
|
||||||
max_over_time(prometheus_remote_storage_shards_desired{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
|
max_over_time(prometheus_remote_storage_shards_desired{job="prometheus",namespace="monitoring"}[5m])
|
||||||
>
|
>
|
||||||
max_over_time(prometheus_remote_storage_shards_max{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m])
|
max_over_time(prometheus_remote_storage_shards_max{job="prometheus",namespace="monitoring"}[5m])
|
||||||
)
|
)
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
@ -189,7 +189,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusrulefailures
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusrulefailures
|
||||||
summary: Prometheus is failing rule evaluations.
|
summary: Prometheus is failing rule evaluations.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_rule_evaluation_failures_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
|
increase(prometheus_rule_evaluation_failures_total{job="prometheus",namespace="monitoring"}[5m]) > 0
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
@ -199,7 +199,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusmissingruleevaluations
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusmissingruleevaluations
|
||||||
summary: Prometheus is missing rule evaluations due to slow rule group evaluation.
|
summary: Prometheus is missing rule evaluations due to slow rule group evaluation.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_rule_group_iterations_missed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
|
increase(prometheus_rule_group_iterations_missed_total{job="prometheus",namespace="monitoring"}[5m]) > 0
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -209,7 +209,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetlimithit
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetlimithit
|
||||||
summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit.
|
summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
|
increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="prometheus",namespace="monitoring"}[5m]) > 0
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -219,7 +219,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuslabellimithit
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuslabellimithit
|
||||||
summary: Prometheus has dropped targets because some scrape configs have exceeded the labels limit.
|
summary: Prometheus has dropped targets because some scrape configs have exceeded the labels limit.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_target_scrape_pool_exceeded_label_limits_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
|
increase(prometheus_target_scrape_pool_exceeded_label_limits_total{job="prometheus",namespace="monitoring"}[5m]) > 0
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -229,7 +229,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapebodysizelimithit
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapebodysizelimithit
|
||||||
summary: Prometheus has dropped some targets that exceeded body size limit.
|
summary: Prometheus has dropped some targets that exceeded body size limit.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_target_scrapes_exceeded_body_size_limit_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
|
increase(prometheus_target_scrapes_exceeded_body_size_limit_total{job="prometheus",namespace="monitoring"}[5m]) > 0
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -239,7 +239,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapesamplelimithit
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapesamplelimithit
|
||||||
summary: Prometheus has failed scrapes that have exceeded the configured sample limit.
|
summary: Prometheus has failed scrapes that have exceeded the configured sample limit.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_target_scrapes_exceeded_sample_limit_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0
|
increase(prometheus_target_scrapes_exceeded_sample_limit_total{job="prometheus",namespace="monitoring"}[5m]) > 0
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -249,7 +249,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetsyncfailure
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetsyncfailure
|
||||||
summary: Prometheus has failed to sync targets.
|
summary: Prometheus has failed to sync targets.
|
||||||
expr: |
|
expr: |
|
||||||
increase(prometheus_target_sync_failed_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[30m]) > 0
|
increase(prometheus_target_sync_failed_total{job="prometheus",namespace="monitoring"}[30m]) > 0
|
||||||
for: 5m
|
for: 5m
|
||||||
labels:
|
labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
@ -259,7 +259,7 @@ spec:
|
|||||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheushighqueryload
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheushighqueryload
|
||||||
summary: Prometheus is reaching its maximum capacity serving concurrent requests.
|
summary: Prometheus is reaching its maximum capacity serving concurrent requests.
|
||||||
expr: |
|
expr: |
|
||||||
avg_over_time(prometheus_engine_queries{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus-{{ $.Release.Name }}",namespace="monitoring"}[5m]) > 0.8
|
avg_over_time(prometheus_engine_queries{job="prometheus",namespace="monitoring"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus",namespace="monitoring"}[5m]) > 0.8
|
||||||
for: 15m
|
for: 15m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
@ -270,9 +270,9 @@ spec:
|
|||||||
summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager.
|
summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager.
|
||||||
expr: |
|
expr: |
|
||||||
min without (alertmanager) (
|
min without (alertmanager) (
|
||||||
rate(prometheus_notifications_errors_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring",alertmanager!~``}[5m])
|
rate(prometheus_notifications_errors_total{job="prometheus",namespace="monitoring",alertmanager!~``}[5m])
|
||||||
/
|
/
|
||||||
rate(prometheus_notifications_sent_total{job="prometheus-{{ $.Release.Name }}",namespace="monitoring",alertmanager!~``}[5m])
|
rate(prometheus_notifications_sent_total{job="prometheus",namespace="monitoring",alertmanager!~``}[5m])
|
||||||
)
|
)
|
||||||
* 100
|
* 100
|
||||||
> 3
|
> 3
|
||||||
|
@ -22,6 +22,8 @@ spec:
|
|||||||
sourceLabels:
|
sourceLabels:
|
||||||
- __meta_kubernetes_pod_name
|
- __meta_kubernetes_pod_name
|
||||||
targetLabel: instance
|
targetLabel: instance
|
||||||
|
- targetLabel: "job"
|
||||||
|
replacement: "prometheus"
|
||||||
- interval: 30s
|
- interval: 30s
|
||||||
port: reloader-web
|
port: reloader-web
|
||||||
selector:
|
selector:
|
||||||
|
Reference in New Issue
Block a user