Alerts


/etc/prometheus/alert_loadbalancing.yml > lowpref
LowGatewayPreference (0 active)
alert: LowGatewayPreference
expr: gw_loadbalancing_pref{segment="1"}
  < 10
for: 1d
labels:
  severity: page
annotations:
  summary: |
    {{ .Labels.gateway }} has low gateway preference ({{ .Value }})
/etc/prometheus/alerts/blackbox-exporter.yml > BlackboxExporter
BlackboxConfigurationReloadFailure (0 active)
alert: BlackboxConfigurationReloadFailure
expr: blackbox_exporter_config_last_reload_successful
  != 1
labels:
  severity: warning
annotations:
  description: |-
    Blackbox configuration reload failure
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox configuration reload failure (instance {{ $labels.instance }})
BlackboxProbeFailed (0 active)
alert: BlackboxProbeFailed
expr: probe_success{job!~"node_pve01|blackbox_tls_pve01"}
  == 0
for: 10m
labels:
  severity: critical
annotations:
  description: |-
    Probe failed
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox probe failed (instance {{ $labels.instance }})
BlackboxProbeSlowHttp (0 active)
alert: BlackboxProbeSlowHttp
expr: avg_over_time(probe_http_duration_seconds[1m])
  > 1
for: 1m
labels:
  severity: warning
annotations:
  description: |-
    HTTP request took more than 1s
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox probe slow HTTP (instance {{ $labels.instance }})
BlackboxProbeSlowPing (0 active)
alert: BlackboxProbeSlowPing
expr: avg_over_time(probe_icmp_duration_seconds[1m])
  > 1
for: 1m
labels:
  severity: warning
annotations:
  description: |-
    Blackbox ping took more than 1s
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox probe slow ping (instance {{ $labels.instance }})
BlackboxSslCertificateExpired (0 active)
alert: BlackboxSslCertificateExpired
expr: round((last_over_time(probe_ssl_earliest_cert_expiry[10m])
  - time()) / 86400, 0.1) < 0
labels:
  severity: critical
annotations:
  description: |-
    SSL certificate has expired already
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox SSL certificate expired (instance {{ $labels.instance }})
BlackboxSslCertificateWillExpireSoon (0 active)
alert: BlackboxSslCertificateWillExpireSoon
expr: 3
  <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400,
  0.1) < 20
labels:
  severity: warning
annotations:
  description: |-
    SSL certificate expires in less than 20 days
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance
    }})
BlackboxSslCertificateWillExpireSoon (0 active)
alert: BlackboxSslCertificateWillExpireSoon
expr: 0
  <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400,
  0.1) < 3
labels:
  severity: critical
annotations:
  description: |-
    SSL certificate expires in less than 3 days
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance
    }})
/etc/prometheus/alerts/node-exporter.yml > NodeExporter
HostNetworkInterfaceSaturated (1 active)
alert: HostNetworkInterfaceSaturated
expr: ((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m])
  + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m]))
  / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}
  > 0.8 < 10000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 1m
labels:
  severity: warning
annotations:
  description: |-
    The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host Network Interface Saturated (instance {{ $labels.instance }})
Labels State Active Since Value
alertname="HostNetworkInterfaceSaturated" device="bb11" instance="gw01n03" job="node_gateways" nodename="gw01n03" severity="warning" firing 2025-04-03 09:44:17.079934382 +0000 UTC 0.9783954844444445
HostOutOfDiskSpace (5 active)
alert: HostOutOfDiskSpace
expr: ((node_filesystem_avail_bytes
  * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint)
  node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Disk is almost full (< 10% left)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host out of disk space (instance {{ $labels.instance }})
Labels State Active Since Value
alertname="HostOutOfDiskSpace" device="zp_pve/subvol-3226-disk-0" fstype="zfs" instance="ffs11" job="node_gateways" mountpoint="/zp_pve/subvol-3226-disk-0" nodename="ffs11" severity="warning" firing 2025-03-31 20:03:47.079934382 +0000 UTC 0
alertname="HostOutOfDiskSpace" device="zp_pve/subvol-3226-disk-0" fstype="zfs" instance="ffs05" job="node_gateways" mountpoint="/zp_pve/subvol-3226-disk-0" nodename="ffs05" severity="warning" firing 2025-03-31 20:03:47.079934382 +0000 UTC 0.008544921875
alertname="HostOutOfDiskSpace" device="rpool/data/subvol-8194-disk-0" fstype="zfs" instance="ffs13" job="node_gateways" mountpoint="/rpool/data/subvol-8194-disk-0" nodename="ffs13" severity="warning" firing 2025-03-31 04:00:32.079934382 +0000 UTC 7.9193115234375
alertname="HostOutOfDiskSpace" device="rpool/data/subvol-8194-disk-1" fstype="zfs" instance="ffs13" job="node_gateways" mountpoint="/rpool/data/subvol-8194-disk-1" nodename="ffs13" severity="warning" firing 2025-03-15 03:04:02.079934382 +0000 UTC 4.9297707297585225
alertname="HostOutOfDiskSpace" device="rpool/data/subvol-8195-disk-0" fstype="zfs" instance="ffs13" job="node_gateways" mountpoint="/rpool/data/subvol-8195-disk-0" nodename="ffs13" severity="warning" firing 2025-03-16 04:43:32.079934382 +0000 UTC 2.0660923549107144
HostOutOfInodes (2 active)
alert: HostOutOfInodes
expr: (node_filesystem_files_free{fstype!="msdosfs"}
  / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and on (instance,
  device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Disk is almost running out of available inodes (< 10% left)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host out of inodes (instance {{ $labels.instance }})
Labels State Active Since Value
alertname="HostOutOfInodes" device="zp_pve/subvol-3226-disk-0" fstype="zfs" instance="ffs11" job="node_gateways" mountpoint="/zp_pve/subvol-3226-disk-0" nodename="ffs11" severity="warning" firing 2025-03-31 20:03:47.079934382 +0000 UTC 0
alertname="HostOutOfInodes" device="zp_pve/subvol-3226-disk-0" fstype="zfs" instance="ffs05" job="node_gateways" mountpoint="/zp_pve/subvol-3226-disk-0" nodename="ffs05" severity="warning" firing 2025-03-31 20:03:47.079934382 +0000 UTC 0.17443491157225793
HostRequiresReboot (5 active)
alert: HostRequiresReboot
expr: (node_reboot_required
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 4h
labels:
  severity: info
annotations:
  description: |-
    {{ $labels.instance }} requires a reboot.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host requires reboot (instance {{ $labels.instance }})
Labels State Active Since Value
alertname="HostRequiresReboot" instance="monitor01" job="node" nodename="monitor01" severity="info" firing 2025-03-16 05:52:47.079934382 +0000 UTC 1
alertname="HostRequiresReboot" instance="ffs08" job="node_gateways" nodename="ffs08" severity="info" firing 2025-01-16 18:11:47 +0000 UTC 1
alertname="HostRequiresReboot" instance="ffs13" job="node_gateways" nodename="ffs13" severity="info" firing 2025-01-13 18:50:17 +0000 UTC 1
alertname="HostRequiresReboot" instance="gw09n04" job="node_gateways" nodename="gw09n04" severity="info" firing 2025-03-25 06:40:32.079934382 +0000 UTC 1
alertname="HostRequiresReboot" instance="ffs05" job="node_gateways" nodename="ffs05" severity="info" firing 2025-03-31 20:03:47.079934382 +0000 UTC 1
HostUnusualDiskIo (4 active)
alert: HostUnusualDiskIo
expr: (rate(node_disk_io_time_seconds_total[1m])
  > 0.5) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host unusual disk IO (instance {{ $labels.instance }})
Labels State Active Since Value
alertname="HostUnusualDiskIo" device="sdf" instance="gw05n02" job="node_gateways" nodename="gw05n02" severity="warning" firing 2025-04-03 09:35:32.079934382 +0000 UTC 0.545422222222098
alertname="HostUnusualDiskIo" device="sdi" instance="gw05n02" job="node_gateways" nodename="gw05n02" severity="warning" firing 2025-04-03 09:39:47.079934382 +0000 UTC 0.5374444444430991
alertname="HostUnusualDiskIo" device="sdq" instance="gw05n02" job="node_gateways" nodename="gw05n02" severity="warning" firing 2025-04-03 09:39:47.079934382 +0000 UTC 0.5530222222208976
alertname="HostUnusualDiskIo" device="sdm" instance="gw05n02" job="node_gateways" nodename="gw05n02" severity="warning" firing 2025-04-03 09:39:47.079934382 +0000 UTC 0.5399333333337886
HostUnusualNetworkThroughputIn (5 active)
alert: HostUnusualNetworkThroughputIn
expr: (sum
  by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100)
  * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    Host network interfaces are probably receiving too much data (> 100 MB/s)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host unusual network throughput in (instance {{ $labels.instance }})
Labels State Active Since Value
alertname="HostUnusualNetworkThroughputIn" instance="ffs13" nodename="ffs13" severity="warning" firing 2025-04-03 05:19:32.079934382 +0000 UTC 234.45386663164405
alertname="HostUnusualNetworkThroughputIn" instance="ffs08" nodename="ffs08" severity="warning" firing 2025-02-04 15:35:17.079934382 +0000 UTC 557.418238721575
alertname="HostUnusualNetworkThroughputIn" instance="core02-z10a" nodename="core02-z10a" severity="warning" firing 2025-04-03 06:56:47.079934382 +0000 UTC 131.98826498551801
alertname="HostUnusualNetworkThroughputIn" instance="core01-z10a" nodename="core01-z10a" severity="warning" firing 2025-03-16 05:21:17.079934382 +0000 UTC 431.5261819673621
alertname="HostUnusualNetworkThroughputIn" instance="gw09n04" nodename="gw09n04" severity="warning" firing 2025-04-03 06:17:02.079934382 +0000 UTC 135.635642914545
HostUnusualNetworkThroughputOut (4 active)
alert: HostUnusualNetworkThroughputOut
expr: (sum
  by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100)
  * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    Host network interfaces are probably sending too much data (> 100 MB/s)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host unusual network throughput out (instance {{ $labels.instance }})
Labels State Active Since Value
alertname="HostUnusualNetworkThroughputOut" instance="core01-z10a" nodename="core01-z10a" severity="warning" firing 2025-04-03 05:00:47.079934382 +0000 UTC 217.7182387973951
alertname="HostUnusualNetworkThroughputOut" instance="ffs08" nodename="ffs08" severity="warning" firing 2025-02-04 15:35:17.079934382 +0000 UTC 563.9806868598575
alertname="HostUnusualNetworkThroughputOut" instance="gw09n04" nodename="gw09n04" severity="warning" firing 2025-04-03 04:09:32.079934382 +0000 UTC 385.26989159356975
alertname="HostUnusualNetworkThroughputOut" instance="ffs13" nodename="ffs13" severity="warning" firing 2025-04-03 05:19:32.079934382 +0000 UTC 247.77873088291716
HostClockNotSynchronising (0 active)
alert: HostClockNotSynchronising
expr: (min_over_time(node_timex_sync_status[1m])
  == 0 and node_timex_maxerror_seconds >= 16) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Clock not synchronising. Ensure NTP is configured on this host.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host clock not synchronising (instance {{ $labels.instance }})
HostClockSkew (0 active)
alert: HostClockSkew
expr: ((node_timex_offset_seconds
  > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds
  < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on (instance) group_left
  (nodename) node_uname_info{nodename=~".+"}
for: 10m
labels:
  severity: warning
annotations:
  description: |-
    Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host clock skew (instance {{ $labels.instance }})
HostConntrackLimit (0 active)
alert: HostConntrackLimit
expr: (node_nf_conntrack_entries
  / node_nf_conntrack_entries_limit > 0.8) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    The number of conntrack is approaching limit
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host conntrack limit (instance {{ $labels.instance }})
HostCpuStealNoisyNeighbor (0 active)
alert: HostCpuStealNoisyNeighbor
expr: (avg
  by (instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 >
  10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 10m
labels:
  severity: warning
annotations:
  description: |-
    CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
HostDiskWillFillIn24Hours (0 active)
alert: HostDiskWillFillIn24Hours
expr: ((node_filesystem_avail_bytes
  * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint)
  predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600)
  < 0 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on
  (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Filesystem is predicted to run out of space within the next 24 hours at current write rate
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
HostEdacCorrectableErrorsDetected (0 active)
alert: HostEdacCorrectableErrorsDetected
expr: (increase(node_edac_correctable_errors_total[1m])
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: info
annotations:
  description: |-
    Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
HostEdacUncorrectableErrorsDetected (0 active)
alert: HostEdacUncorrectableErrorsDetected
expr: (node_edac_uncorrectable_errors_total
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: warning
annotations:
  description: |-
    Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
HostFilesystemDeviceError (0 active)
alert: HostFilesystemDeviceError
expr: node_filesystem_device_error
  == 1
for: 2m
labels:
  severity: critical
annotations:
  description: |-
    {{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host filesystem device error (instance {{ $labels.instance }})
HostInodesWillFillIn24Hours (0 active)
alert: HostInodesWillFillIn24Hours
expr: (node_filesystem_files_free{fstype!="msdosfs"}
  / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h],
  24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"}
  == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Filesystem is predicted to run out of inodes within the next 24 hours at current write rate
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
HostKernelVersionDeviations (0 active)
alert: HostKernelVersionDeviations
expr: (count(sum
  by (kernel) (label_replace(node_uname_info, "kernel", "$1", "release",
  "([0-9]+.[0-9]+.[0-9]+).*"))) > 1) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 6h
labels:
  severity: warning
annotations:
  description: |-
    Different kernel versions are running
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host kernel version deviations (instance {{ $labels.instance }})
HostMemoryUnderMemoryPressure (0 active)
alert: HostMemoryUnderMemoryPressure
expr: (rate(node_vmstat_pgmajfault[1m])
  > 1000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    The node is under heavy memory pressure. High rate of major page faults
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host memory under memory pressure (instance {{ $labels.instance }})
HostNetworkBondDegraded (0 active)
alert: HostNetworkBondDegraded
expr: ((node_bonding_active
  - node_bonding_slaves) != 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Bond "{{ $labels.device }}" degraded on "{{ $labels.instance }}".
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host Network Bond Degraded (instance {{ $labels.instance }})
HostNetworkReceiveErrors (0 active)
alert: HostNetworkReceiveErrors
expr: (rate(node_network_receive_errs_total[2m])
  / rate(node_network_receive_packets_total[2m]) > 0.01) * on (instance) group_left
  (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host Network Receive Errors (instance {{ $labels.instance }})
HostNetworkTransmitErrors (0 active)
alert: HostNetworkTransmitErrors
expr: (rate(node_network_transmit_errs_total{device!~"^g09n03abbtesta|^g09n03amobrtra|^g09n03bbbtestb"}[2m])
  / rate(node_network_transmit_packets_total[2m]) > 0.01) * on (instance) group_left
  (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host Network Transmit Errors (instance {{ $labels.instance }})
HostNodeOvertemperatureAlarm (0 active)
alert: HostNodeOvertemperatureAlarm
expr: ((node_hwmon_temp_crit_alarm_celsius
  == 1) or (node_hwmon_temp_alarm == 1)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: critical
annotations:
  description: |-
    Physical node temperature alarm triggered
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host node overtemperature alarm (instance {{ $labels.instance }})
HostOomKillDetected (0 active)
alert: HostOomKillDetected
expr: (increase(node_vmstat_oom_kill[1m])
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: warning
annotations:
  description: |-
    OOM kill detected
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host OOM kill detected (instance {{ $labels.instance }})
HostOutOfMemory (0 active)
alert: HostOutOfMemory
expr: (node_memory_MemAvailable_bytes
  / node_memory_MemTotal_bytes * 100 < 10) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Node memory is filling up (< 10% left)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host out of memory (instance {{ $labels.instance }})
HostPhysicalComponentTooHot (0 active)
alert: HostPhysicalComponentTooHot
expr: ((node_hwmon_temp_celsius
  * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"}
  > 75)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    Physical hardware component too hot
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host physical component too hot (instance {{ $labels.instance }})
HostRaidArrayGotInactive (0 active)
alert: HostRaidArrayGotInactive
expr: (node_md_state{state="inactive"}
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: critical
annotations:
  description: |-
    RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host RAID array got inactive (instance {{ $labels.instance }})
HostRaidDiskFailure (0 active)
alert: HostRaidDiskFailure
expr: (node_md_disks{state="failed"}
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host RAID disk failure (instance {{ $labels.instance }})
HostSwapIsFillingUp (0 active)
alert: HostSwapIsFillingUp
expr: ((1
  - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on
  (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Swap is filling up (>80%)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host swap is filling up (instance {{ $labels.instance }})
HostSystemdServiceCrashed (0 active)
alert: HostSystemdServiceCrashed
expr: (node_systemd_unit_state{state="failed"}
  == 1) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 10m
labels:
  severity: warning
annotations:
  description: |-
    systemd service crashed
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host systemd service crashed (instance {{ $labels.instance }})
HostUnusualDiskReadLatency (0 active)
alert: HostUnusualDiskReadLatency
expr: (rate(node_disk_read_time_seconds_total[1m])
  / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m])
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Disk latency is growing (read operations > 100ms)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host unusual disk read latency (instance {{ $labels.instance }})
HostUnusualDiskWriteLatency (0 active)
alert: HostUnusualDiskWriteLatency
expr: (rate(node_disk_write_time_seconds_total[1m])
  / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m])
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Disk latency is growing (write operations > 100ms)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host unusual disk write latency (instance {{ $labels.instance }})
/etc/prometheus/alerts/smartctl-exporter.yml > SmartctlExporter
SmartCriticalWarning (0 active)
alert: SmartCriticalWarning
expr: smartctl_device_critical_warning
  > 0
for: 15m
labels:
  severity: critical
annotations:
  description: |-
    device has critical warning (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart critical warning (instance {{ $labels.instance }})
SmartDeviceTemperatureCritical (0 active)
alert: SmartDeviceTemperatureCritical
expr: smartctl_device_temperature
  > 80
for: 2m
labels:
  severity: critical
annotations:
  description: |-
    Device temperature critical  (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart device temperature critical (instance {{ $labels.instance }})
SmartDeviceTemperatureWarning (0 active)
alert: SmartDeviceTemperatureWarning
expr: smartctl_device_temperature
  > 60
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Device temperature  warning (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart device temperature warning (instance {{ $labels.instance }})
SmartMediaErrors (0 active)
alert: SmartMediaErrors
expr: smartctl_device_media_errors
  > 0
for: 15m
labels:
  severity: critical
annotations:
  description: |-
    device has media errors (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart media errors (instance {{ $labels.instance }})
SmartNvmeWearoutIndicator (0 active)
alert: SmartNvmeWearoutIndicator
expr: smartctl_device_available_spare{device=~"nvme.*"}
  < smartctl_device_available_spare_threshold{device=~"nvme.*"}
for: 15m
labels:
  severity: critical
annotations:
  description: |-
    NVMe device is wearing out (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart NVME Wearout Indicator (instance {{ $labels.instance }})