ljzsdut
GitHubToggle Dark/Light/Auto modeToggle Dark/Light/Auto modeToggle Dark/Light/Auto modeBack to homepage

01 Alert Rules整理

文件系统使用率:

node_filesystem_free_bytes{mountpoint=~"(/|/u01)"}/node_filesystem_size_bytes{mountpoint=~"(/|/u01)"}*100

Prometheus

参考:https://awesome-prometheus-alerts.grep.to/rules#host-and-hardware

  - alert: PrometheusTargetMissing
    expr: up == 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus target missing (instance {{ $labels.instance }})"
      description: "A Prometheus target has disappeared. An exporter might be crashed.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusConfigurationReloadFailure
    expr: prometheus_config_last_reload_successful != 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus configuration reload failure (instance {{ $labels.instance }})"
      description: "Prometheus configuration reload error\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTooManyRestarts
    expr: changes(process_start_time_seconds{job=~"expose-prometheus-metrics|pushgateway|expose-alertmanager-metrics"}[15m]) > 2
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus too many restarts (instance {{ $labels.instance }})"
      description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusAlertmanagerConfigurationReloadFailure
    expr: alertmanager_config_last_reload_successful != 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})"
      description: "AlertManager configuration reload error\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusAlertmanagerConfigNotSynced
    expr: count(count_values("config_hash", alertmanager_config_hash)) > 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus AlertManager config not synced (instance {{ $labels.instance }})"
      description: "Configurations of AlertManager cluster instances are out of sync\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"


  - alert: PrometheusNotConnectedToAlertmanager
    expr: prometheus_notifications_alertmanagers_discovered < 1
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus not connected to alertmanager (instance {{ $labels.instance }})"
      description: "Prometheus cannot connect the alertmanager\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusRuleEvaluationFailures
    expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus rule evaluation failures (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTemplateTextExpansionFailures
    expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus template text expansion failures (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} template text expansion failures\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusRuleEvaluationSlow
    expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus rule evaluation slow (instance {{ $labels.instance }})"
      description: "Prometheus rule evaluation took more time than the scheduled interval. I indicates a slower storage backend access or too complex query.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusNotificationsBacklog
    expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus notifications backlog (instance {{ $labels.instance }})"
      description: "The Prometheus notification queue has not been empty for 10 minutes\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusAlertmanagerNotificationFailing
    expr: rate(alertmanager_notifications_failed_total[1m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus AlertManager notification failing (instance {{ $labels.instance }})"
      description: "Alertmanager is failing sending notifications\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"



  - alert: PrometheusTargetScrapingSlow
    expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus target scraping slow (instance {{ $labels.instance }})"
      description: "Prometheus is scraping exporters slowly\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusLargeScrape
    expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus large scrape (instance {{ $labels.instance }})"
      description: "Prometheus has many scrapes that exceed the sample limit\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTargetScrapeDuplicate
    expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Prometheus target scrape duplicate (instance {{ $labels.instance }})"
      description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTsdbCheckpointCreationFailures
    expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} checkpoint creation failures\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTsdbCheckpointDeletionFailures
    expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTsdbCompactionsFailed
    expr: increase(prometheus_tsdb_compactions_failed_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus TSDB compactions failed (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} TSDB compactions failures\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTsdbHeadTruncationsFailed
    expr: increase(prometheus_tsdb_head_truncations_failed_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus TSDB head truncations failed (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTsdbReloadFailures
    expr: increase(prometheus_tsdb_reloads_failures_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus TSDB reload failures (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} TSDB reload failures\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTsdbWalCorruptions
    expr: increase(prometheus_tsdb_wal_corruptions_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: PrometheusTsdbWalTruncationsFailed
    expr: increase(prometheus_tsdb_wal_truncations_failed_total[3m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})"
      description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlDown
    expr: mysql_up == 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "MySQL down (instance {{ $labels.instance }})"
      description: "MySQL instance is down on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlTooManyConnections
    expr: avg by (instance) (max_over_time(mysql_global_status_threads_connected[5m])) / avg by (instance) (mysql_global_variables_max_connections) * 100 > 80
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "MySQL too many connections (instance {{ $labels.instance }})"
      description: "More than 80% of MySQL connections are in use on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlHighThreadsRunning
    expr: avg by (instance) (max_over_time(mysql_global_status_threads_running[5m])) / avg by (instance) (mysql_global_variables_max_connections) * 100 > 60
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "MySQL high threads running (instance {{ $labels.instance }})"
      description: "More than 60% of MySQL connections are in running state on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlSlaveIoThreadNotRunning
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) mysql_slave_status_slave_io_running == 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "MySQL Slave IO thread not running (instance {{ $labels.instance }})"
      description: "MySQL Slave IO thread not running on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlSlaveSqlThreadNotRunning
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) mysql_slave_status_slave_sql_running == 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "MySQL Slave SQL thread not running (instance {{ $labels.instance }})"
      description: "MySQL Slave SQL thread not running on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlSlaveReplicationLag
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) (mysql_slave_status_seconds_behind_master - mysql_slave_status_sql_delay) > 300
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "MySQL Slave replication lag (instance {{ $labels.instance }})"
      description: "MysqL replication lag on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlSlowQueries
    expr: mysql_global_status_slow_queries > 0
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "MySQL slow queries (instance {{ $labels.instance }})"
      description: "MySQL server is having some slow queries.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlRestarted
    expr: mysql_global_status_uptime < 60
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "MySQL restarted (instance {{ $labels.instance }})"
      description: "MySQL has just been restarted, less than one minute ago on {{ $labels.instance }}.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KafkaTopicsReplicas
    expr: sum(kafka_topic_partition_in_sync_replica) by (topic) < 3
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kafka topics replicas (instance {{ $labels.instance }})"
      description: "Kafka topic in-sync partition\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KafkaConsumersGroup
    expr: sum(kafka_consumergroup_lag) by (consumergroup) > 50
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kafka consumers group (instance {{ $labels.instance }})"
      description: "Kafka consumers group\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: NginxHighHttp4xxErrorRate
    expr: sum(rate(nginx_http_requests_total{status=~"^4.."}[1m])) / sum(rate(nginx_http_requests_total[1m])) * 100 > 5
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Nginx high HTTP 4xx error rate (instance {{ $labels.instance }})"
      description: "Too many HTTP requests with status 4xx (> 5%)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: NginxHighHttp5xxErrorRate
    expr: sum(rate(nginx_http_requests_total{status=~"^5.."}[1m])) / sum(rate(nginx_http_requests_total[1m])) * 100 > 5
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Nginx high HTTP 5xx error rate (instance {{ $labels.instance }})"
      description: "Too many HTTP requests with status 5xx (> 5%)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: NginxLatencyHigh
    expr: histogram_quantile(0.99, sum(rate(nginx_http_request_duration_seconds_bucket[30m])) by (host, node)) > 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Nginx latency high (instance {{ $labels.instance }})"
      description: "Nginx p99 latency is higher than 10 seconds\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesNodeReady
    expr: kube_node_status_condition{condition="Ready",status="true"} == 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes Node ready (instance {{ $labels.instance }})"
      description: "Node {{ $labels.node }} has been unready for a long time\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesMemoryPressure
    expr: kube_node_status_condition{condition="MemoryPressure",status="true"} == 1
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes memory pressure (instance {{ $labels.instance }})"
      description: "{{ $labels.node }} has MemoryPressure condition\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesDiskPressure
    expr: kube_node_status_condition{condition="DiskPressure",status="true"} == 1
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes disk pressure (instance {{ $labels.instance }})"
      description: "{{ $labels.node }} has DiskPressure condition\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesOutOfDisk
    expr: kube_node_status_condition{condition="OutOfDisk",status="true"} == 1
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes out of disk (instance {{ $labels.instance }})"
      description: "{{ $labels.node }} has OutOfDisk condition\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesJobFailed
    expr: kube_job_status_failed > 0
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes Job failed (instance {{ $labels.instance }})"
      description: "Job {{$labels.namespace}}/{{$labels.exported_job}} failed to complete\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesCronjobSuspended
    expr: kube_cronjob_spec_suspend != 0
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes CronJob suspended (instance {{ $labels.instance }})"
      description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is suspended\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesPersistentvolumeclaimPending
    expr: kube_persistentvolumeclaim_status_phase{phase="Pending"} == 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes PersistentVolumeClaim pending (instance {{ $labels.instance }})"
      description: "PersistentVolumeClaim {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is pending\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesVolumeOutOfDiskSpace
    expr: kubelet_volume_stats_available_bytes / kubelet_volume_stats_capacity_bytes * 100 < 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes Volume out of disk space (instance {{ $labels.instance }})"
      description: "Volume is almost full (< 10% left)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesVolumeFullInFourDays
    expr: predict_linear(kubelet_volume_stats_available_bytes[6h], 4 * 24 * 3600) < 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes Volume full in four days (instance {{ $labels.instance }})"
      description: "{{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesPersistentvolumeError
    expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes PersistentVolume error (instance {{ $labels.instance }})"
      description: "Persistent volume is in bad state\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesStatefulsetDown
    expr: (kube_statefulset_status_replicas_ready / kube_statefulset_status_replicas_current) != 1
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes StatefulSet down (instance {{ $labels.instance }})"
      description: "A StatefulSet went down\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesHpaScalingAbility
    expr: kube_hpa_status_condition{condition="false", status="AbleToScale"} == 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes HPA scaling ability (instance {{ $labels.instance }})"
      description: "Pod is unable to scale\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesHpaMetricAvailability
    expr: kube_hpa_status_condition{condition="false", status="ScalingActive"} == 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes HPA metric availability (instance {{ $labels.instance }})"
      description: "HPA is not able to colelct metrics\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesHpaScaleCapability
    expr: kube_hpa_status_desired_replicas >= kube_hpa_spec_max_replicas
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes HPA scale capability (instance {{ $labels.instance }})"
      description: "The maximum number of desired Pods has been hit\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesPodNotHealthy
    expr: min_over_time(sum by (namespace, pod) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"})[1h:]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes Pod not healthy (instance {{ $labels.instance }})"
      description: "Pod has been in a non-ready state for longer than an hour.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesPodCrashLooping
    expr: rate(kube_pod_container_status_restarts_total[15m]) * 60 * 5 > 5
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes pod crash looping (instance {{ $labels.instance }})"
      description: "Pod {{ $labels.pod }} is crash looping\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesReplicassetMismatch
    expr: kube_replicaset_spec_replicas != kube_replicaset_status_ready_replicas
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes ReplicasSet mismatch (instance {{ $labels.instance }})"
      description: "Deployment Replicas mismatch\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesDeploymentReplicasMismatch
    expr: kube_deployment_spec_replicas != kube_deployment_status_replicas_available
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes Deployment replicas mismatch (instance {{ $labels.instance }})"
      description: "Deployment Replicas mismatch\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesStatefulsetReplicasMismatch
    expr: kube_statefulset_status_replicas_ready != kube_statefulset_status_replicas
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes StatefulSet replicas mismatch (instance {{ $labels.instance }})"
      description: "A StatefulSet has not matched the expected number of replicas for longer than 15 minutes.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesDeploymentGenerationMismatch
    expr: kube_deployment_status_observed_generation != kube_deployment_metadata_generation
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes Deployment generation mismatch (instance {{ $labels.instance }})"
      description: "A Deployment has failed but has not been rolled back.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesStatefulsetGenerationMismatch
    expr: kube_statefulset_status_observed_generation != kube_statefulset_metadata_generation
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes StatefulSet generation mismatch (instance {{ $labels.instance }})"
      description: "A StatefulSet has failed but has not been rolled back.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesStatefulsetUpdateNotRolledOut
    expr: max without (revision) (kube_statefulset_status_current_revision unless kube_statefulset_status_update_revision) * (kube_statefulset_replicas != kube_statefulset_status_replicas_updated)
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes StatefulSet update not rolled out (instance {{ $labels.instance }})"
      description: "StatefulSet update has not been rolled out.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesDaemonsetRolloutStuck
    expr: kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled * 100 < 100 or kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes DaemonSet rollout stuck (instance {{ $labels.instance }})"
      description: "Some Pods of DaemonSet are not scheduled or not ready\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesDaemonsetMisscheduled
    expr: kube_daemonset_status_number_misscheduled > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes DaemonSet misscheduled (instance {{ $labels.instance }})"
      description: "Some DaemonSet Pods are running where they are not supposed to run\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesCronjobTooLong
    expr: time() - kube_cronjob_next_schedule_time > 3600
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes CronJob too long (instance {{ $labels.instance }})"
      description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to complete.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesJobCompletion
    expr: kube_job_spec_completions - kube_job_status_succeeded > 0 or kube_job_status_failed > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes job completion (instance {{ $labels.instance }})"
      description: "Kubernetes Job failed to complete\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesApiServerErrors
    expr: sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[2m])) / sum(rate(apiserver_request_count{job="apiserver"}[2m])) * 100 > 3
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes API server errors (instance {{ $labels.instance }})"
      description: "Kubernetes API server is experiencing high error rate\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesApiClientErrors
    expr: (sum(rate(rest_client_requests_total{code=~"(4|5).."}[2m])) by (instance, job) / sum(rate(rest_client_requests_total[2m])) by (instance, job)) * 100 > 1
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes API client errors (instance {{ $labels.instance }})"
      description: "Kubernetes API client is experiencing high error rate\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesClientCertificateExpiresNextWeek
    expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 7*24*60*60
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes client certificate expires next week (instance {{ $labels.instance }})"
      description: "A client certificate used to authenticate to the apiserver is expiring next week.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesClientCertificateExpiresSoon
    expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 24*60*60
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Kubernetes client certificate expires soon (instance {{ $labels.instance }})"
      description: "A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: KubernetesApiServerLatency
    expr: histogram_quantile(0.99, sum(apiserver_request_latencies_bucket{verb!~"CONNECT|WATCHLIST|WATCH|PROXY"}) WITHOUT (instance, resource)) / 1e+06 > 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Kubernetes API server latency (instance {{ $labels.instance }})"
      description: "Kubernetes API server has a 99th percentile latency of {{ $value }} seconds for {{ $labels.verb }} {{ $labels.resource }}.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephState
    expr: ceph_health_status != 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Ceph State (instance {{ $labels.instance }})"
      description: "Ceph instance unhealthy\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephMonitorClockSkew
    expr: abs(ceph_monitor_clock_skew_seconds) > 0.2
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Ceph monitor clock skew (instance {{ $labels.instance }})"
      description: "Ceph monitor clock skew detected. Please check ntp and hardware clock settings\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephMonitorLowSpace
    expr: ceph_monitor_avail_percent < 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Ceph monitor low space (instance {{ $labels.instance }})"
      description: "Ceph monitor storage is low.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephOsdDown
    expr: ceph_osd_up == 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Ceph OSD Down (instance {{ $labels.instance }})"
      description: "Ceph Object Storage Daemon Down\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephHighOsdLatency
    expr: ceph_osd_perf_apply_latency_seconds > 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Ceph high OSD latency (instance {{ $labels.instance }})"
      description: "Ceph Object Storage Daemon latetncy is high. Please check if it doesn't stuck in weird state.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephOsdLowSpace
    expr: ceph_osd_utilization > 90
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Ceph OSD low space (instance {{ $labels.instance }})"
      description: "Ceph Object Storage Daemon is going out of space. Please add more disks.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephOsdReweighted
    expr: ceph_osd_weight < 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Ceph OSD reweighted (instance {{ $labels.instance }})"
      description: "Ceph Object Storage Daemon take ttoo much time to resize.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephPgDown
    expr: ceph_pg_down > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Ceph PG down (instance {{ $labels.instance }})"
      description: "Some Ceph placement groups are down. Please ensure that all the data are available.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephPgIncomplete
    expr: ceph_pg_incomplete > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Ceph PG incomplete (instance {{ $labels.instance }})"
      description: "Some Ceph placement groups are incomplete. Please ensure that all the data are available.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephPgInconsistant
    expr: ceph_pg_inconsistent > 0
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Ceph PG inconsistant (instance {{ $labels.instance }})"
      description: "Some Ceph placement groups are inconsitent. Data is available but inconsistent across nodes.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephPgActivationLong
    expr: ceph_pg_activating > 0
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Ceph PG activation long (instance {{ $labels.instance }})"
      description: "Some Ceph placement groups are too long to activate.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephPgBackfillFull
    expr: ceph_pg_backfill_toofull > 0
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Ceph PG backfill full (instance {{ $labels.instance }})"
      description: "Some Ceph placement groups are located on full Object Storage Daemon on cluster. Those PGs can be unavailable shortly. Please check OSDs, change weight or reconfigure CRUSH rules.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: CephPgUnavailable
    expr: ceph_pg_total - ceph_pg_active > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Ceph PG unavailable (instance {{ $labels.instance }})"
      description: "Some Ceph placement groups are unavailable.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: ThanosCompactionHalted
    expr: thanos_compactor_halted == 1
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Thanos compaction halted (instance {{ $labels.instance }})"
      description: "Thanos compaction has failed to run and is now halted.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: ThanosCompactBucketOperationFailure
    expr: rate(thanos_objstore_bucket_operation_failures_total[1m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Thanos compact bucket operation failure (instance {{ $labels.instance }})"
      description: "Thanos compaction has failing storage operations\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: ThanosCompactNotRun
    expr: (time() - thanos_objstore_bucket_last_successful_upload_time) > 24*60*60
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Thanos compact not run (instance {{ $labels.instance }})"
      description: "Thanos compaction has not run in 24 hours.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

Node监控

    - alert: HostOutOfMemory
      expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
      # expr: (1 - sum by(instance) (node_memory_MemAvailable_bytes) / sum by(instance) (node_memory_MemTotal_bytes)) * 100 >= 80
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host out of memory (instance {{ $labels.instance }})"
        description: "Node memory is filling up (< 10% left)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostMemoryUnderMemoryPressure
      expr: rate(node_vmstat_pgmajfault[5m]) > 1000
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host memory under memory pressure (instance {{ $labels.instance }})"
        description: "The node is under heavy memory pressure. High rate of major page faults\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostUnusualNetworkThroughputIn
      expr: sum by (instance) (irate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host unusual network throughput in (instance {{ $labels.instance }})"
        description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostUnusualNetworkThroughputOut
      expr: sum by (instance) (irate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host unusual network throughput out (instance {{ $labels.instance }})"
        description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostUnusualDiskReadRate
      expr: sum by (instance) (irate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host unusual disk read rate (instance {{ $labels.instance }})"
        description: "Disk is probably reading too much data (> 50 MB/s)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostUnusualDiskWriteRate
      expr: sum by (instance) (irate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host unusual disk write rate (instance {{ $labels.instance }})"
        description: "Disk is probably writing too much data (> 50 MB/s)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostOutOfDiskSpace
      expr: node_filesystem_avail_bytes{mountpoint=~"(/|/u01)"} / node_filesystem_size_bytes{mountpoint=~"(/|/u01)"} * 100 < 10
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host out of disk space (instance {{ $labels.instance }})"
        description: "Disk is almost full (< 10% left)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostDiskWillFillIn24Hours
      expr: predict_linear(node_filesystem_free_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host disk will fill in 24 hours (instance {{ $labels.instance }})"
        description: "Disk will fill in 24 hours at current write rate\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostOutOfInodes
      expr: node_filesystem_files_free{mountpoint=~"(/|/u01)"} / node_filesystem_files{mountpoint=~"(/|/u01)"} * 100 < 10
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host out of inodes (instance {{ $labels.instance }})"
        description: "Disk is almost running out of available inodes (< 10% left)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostUnusualDiskReadLatency
      expr: rate(node_disk_read_time_seconds_total[2m]) / rate(node_disk_reads_completed_total[2m]) > 100
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host unusual disk read latency (instance {{ $labels.instance }})"
        description: "Disk latency is growing (read operations > 100ms)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostUnusualDiskWriteLatency
      expr: rate(node_disk_write_time_seconds_total[2m]) / rate(node_disk_writes_completed_total[2m]) > 100
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host unusual disk write latency (instance {{ $labels.instance }})"
        description: "Disk latency is growing (write operations > 100ms)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostHighCpuLoad
      expr: 100 - (avg by(instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
      #expr: sum by(node) (node_load1) / sum by(node) (machine_cpu_cores) * 100 > 100
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host high CPU load (instance {{ $labels.instance }})"
        description: "CPU load is > 80%\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    # 1000 context switches is an arbitrary number.
    # Alert threshold depends on nature of application.
    # Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58
    - alert: HostContextSwitching
      expr: (rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 1000
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host context switching (instance {{ $labels.instance }})"
        description: "Context switching is growing on node (> 1000 / s)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostSwapIsFillingUp
      expr: (1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host swap is filling up (instance {{ $labels.instance }})"
        description: "Swap is filling up (>80%)\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostSystemdServiceCrashed
      expr: node_systemd_unit_state{state="failed"} == 1
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host SystemD service crashed (instance {{ $labels.instance }})"
        description: "SystemD service crashed\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostPhysicalComponentTooHot
      expr: node_hwmon_temp_celsius > 75
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host physical component too hot (instance {{ $labels.instance }})"
        description: "Physical hardware component too hot\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostNodeOvertemperatureAlarm
      expr: node_hwmon_temp_alarm == 1
      for: 5m
      labels:
        severity: critical
      annotations:
        summary: "Host node overtemperature alarm (instance {{ $labels.instance }})"
        description: "Physical node temperature alarm triggered\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

    - alert: HostRaidArrayGotInactive
      expr: node_md_state{state="inactive"} > 0
      for: 5m
      labels:
        severity: critical
      annotations:
        summary: "Host RAID array got inactive (instance {{ $labels.instance }})"
        description: "RAID array {{ $labels.device }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"


    - alert: HostKernelVersionDeviations
      expr: count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+).[0-9]+.*")) by (kernel)) > 1
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host kernel version deviations (instance {{ $labels.instance }})"
        description: "Different kernel versions are running\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"



    - alert: HostNetworkReceiveErrors
      expr: increase(node_network_receive_errs_total[5m]) > 0
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host Network Receive Errors (instance {{ $labels.instance }})"
        description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last five minutes.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}'

    - alert: HostNetworkTransmitErrors
      expr: increase(node_network_transmit_errs_total[5m]) > 0
      for: 5m
      labels:
        severity: warning
      annotations:
        summary: "Host Network Transmit Errors (instance {{ $labels.instance }})"
        description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last five minutes.\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}'

Blackbox

  - alert: BlackboxProbeFailed
    expr: probe_success == 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Blackbox probe failed (instance {{ $labels.instance }})"
      description: "Probe failed\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: BlackboxSlowProbe
    expr: avg_over_time(probe_duration_seconds[1m]) > 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Blackbox slow probe (instance {{ $labels.instance }})"
      description: "Blackbox probe took more than 1s to complete\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: BlackboxProbeHttpFailure
    expr: probe_http_status_code <= 199 OR probe_http_status_code >= 400
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Blackbox probe HTTP failure (instance {{ $labels.instance }})"
      description: "HTTP status code is not 200-399\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: BlackboxSslCertificateWillExpireSoon
    expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 15
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})"
      description: "SSL certificate expires in 15 days\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: BlackboxSslCertificateExpired
    expr: probe_ssl_earliest_cert_expiry - time() <= 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Blackbox SSL certificate expired (instance {{ $labels.instance }})"
      description: "SSL certificate has expired already\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: BlackboxProbeSlowHttp
    expr: avg_over_time(probe_http_duration_seconds[1m]) > 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Blackbox probe slow HTTP (instance {{ $labels.instance }})"
      description: "HTTP request took more than 1s\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: BlackboxProbeSlowPing
    expr: avg_over_time(probe_icmp_duration_seconds[1m]) > 1
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Blackbox probe slow ping (instance {{ $labels.instance }})"
      description: "Blackbox ping took more than 1s\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

MySQL

- alert: MysqlDown
    expr: mysql_up == 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "MySQL down (instance {{ $labels.instance }})"
      description: "MySQL instance is down on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

- alert: MysqlTooManyConnections
    expr: avg by (instance) (max_over_time(mysql_global_status_threads_connected[5m])) / avg by (instance) (mysql_global_variables_max_connections) * 100 > 80
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "MySQL too many connections (instance {{ $labels.instance }})"
      description: "More than 80% of MySQL connections are in use on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: MysqlHighThreadsRunning
    expr: avg by (instance) (max_over_time(mysql_global_status_threads_running[5m])) / avg by (instance) (mysql_global_variables_max_connections) * 100 > 60
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "MySQL high threads running (instance {{ $labels.instance }})"
      description: "More than 60% of MySQL connections are in running state on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

redis

etcd

  - alert: EtcdNoLeader
    expr: etcd_server_has_leader != 1
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "Etcd no Leader (instance {{ $labels.instance }})"
      description: "Etcd cluster have no leader\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: EtcdHighNumberOfLeaderChanges
    expr: increase(etcd_server_leader_changes_seen_total[1h]) > 3
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Etcd high number of leader changes (instance {{ $labels.instance }})"
      description: "Etcd leader changed more than 3 times during last hour\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: EtcdHighNumberOfFailedGrpcRequests
    expr: sum(rate(grpc_server_handled_total{grpc_code!="OK"}[5m])) BY (grpc_service, grpc_method) / sum(rate(grpc_server_handled_total[5m])) BY (grpc_service, grpc_method) > 0.01
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Etcd high number of failed GRPC requests (instance {{ $labels.instance }})"
      description: "More than 1% GRPC request failure detected in Etcd for 5 minutes\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: EtcdHighNumberOfFailedGrpcRequests
    expr: sum(rate(grpc_server_handled_total{grpc_code!="OK"}[5m])) BY (grpc_service, grpc_method) / sum(rate(grpc_server_handled_total[5m])) BY (grpc_service, grpc_method) > 0.01
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Etcd high number of failed GRPC requests (instance {{ $labels.instance }})"
      description: "More than 1% GRPC request failure detected in Etcd for 5 minutes\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"


  - alert: EtcdMemberCommunicationSlow
    expr: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m])) > 0.15
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Etcd member communication slow (instance {{ $labels.instance }})"
      description: "Etcd member communication slowing down, 99th percentil is over 0.15s for 5 minutes\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: EtcdHighNumberOfFailedProposals
    expr: increase(etcd_server_proposals_failed_total[1h]) > 5
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Etcd high number of failed proposals (instance {{ $labels.instance }})"
      description: "Etcd server got more than 5 failed proposals past hour\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: EtcdHighFsyncDurations
    expr: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) > 0.5
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Etcd high fsync durations (instance {{ $labels.instance }})"
      description: "Etcd WAL fsync duration increasing, 99th percentil is over 0.5s for 5 minutes\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

  - alert: EtcdHighCommitDurations
    expr: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) > 0.25
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Etcd high commit durations (instance {{ $labels.instance }})"
      description: "Etcd commit duration increasing, 99th percentil is over 0.25s for 5 minutes\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"

CoreDNS

  - alert: CorednsPanicCount
    expr: increase(coredns_panic_count_total[10m]) > 0
    for: 5m
    labels:
      severity: critical
    annotations:
      summary: "CoreDNS Panic Count (instance {{ $labels.instance }})"
      description: "Number of CoreDNS panics encountered\n  VALUE = {{ $value }}\n  LABELS: {{ $labels }}"