Alerts


/etc/prometheus/alert_healthchecks.yml > Selfmonitoring
SelfMonitoringAlwaysFiring (1 active)
alert: SelfMonitoringAlwaysFiring
expr: minute() >=
  0
for: 1s
labels:
  application: leonard_healthchecks
  severity: info
Labels State Active Since Value
alertname="SelfMonitoringAlwaysFiring" application="leonard_healthchecks" severity="info" firing 2025-05-27 17:27:35.330119324 +0000 UTC 2
/etc/prometheus/alert_loadbalancing.yml > lowpref
LowGatewayPreference (1 active)
alert: LowGatewayPreference
expr: gw_loadbalancing_pref{segment="1"}
  < 10
for: 1d
labels:
  severity: page
annotations:
  summary: |
    {{ .Labels.gateway }} has low gateway preference ({{ .Value }})
Labels State Active Since Value
alertname="LowGatewayPreference" instance="gw01n03" job="json_gwpref" segment="1" severity="page" firing 2025-05-27 17:29:33.417307786 +0000 UTC -9
/etc/prometheus/alerts/blackbox-exporter.yml > BlackboxExporter
BlackboxConfigurationReloadFailure (0 active)
alert: BlackboxConfigurationReloadFailure
expr: blackbox_exporter_config_last_reload_successful
  != 1
labels:
  severity: warning
annotations:
  description: |-
    Blackbox configuration reload failure
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox configuration reload failure (instance {{ $labels.instance }})
BlackboxProbeFailed (0 active)
alert: BlackboxProbeFailed
expr: probe_success{job!~"node_pve01|blackbox_tls_pve01"}
  == 0
for: 10m
labels:
  severity: critical
annotations:
  description: |-
    Probe failed
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox probe failed (instance {{ $labels.instance }})
BlackboxProbeSlowHttp (0 active)
alert: BlackboxProbeSlowHttp
expr: avg_over_time(probe_http_duration_seconds[1m])
  > 1
for: 1m
labels:
  severity: warning
annotations:
  description: |-
    HTTP request took more than 1s
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox probe slow HTTP (instance {{ $labels.instance }})
BlackboxProbeSlowPing (0 active)
alert: BlackboxProbeSlowPing
expr: avg_over_time(probe_icmp_duration_seconds[1m])
  > 1
for: 1m
labels:
  severity: warning
annotations:
  description: |-
    Blackbox ping took more than 1s
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox probe slow ping (instance {{ $labels.instance }})
BlackboxSslCertificateExpired (0 active)
alert: BlackboxSslCertificateExpired
expr: round((last_over_time(probe_ssl_earliest_cert_expiry[10m])
  - time()) / 86400, 0.1) < 0
labels:
  severity: critical
annotations:
  description: |-
    SSL certificate has expired already
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox SSL certificate expired (instance {{ $labels.instance }})
BlackboxSslCertificateWillExpireSoon (0 active)
alert: BlackboxSslCertificateWillExpireSoon
expr: 3
  <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400,
  0.1) < 20
labels:
  severity: warning
annotations:
  description: |-
    SSL certificate expires in less than 20 days
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance
    }})
BlackboxSslCertificateWillExpireSoon (0 active)
alert: BlackboxSslCertificateWillExpireSoon
expr: 0
  <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400,
  0.1) < 3
labels:
  severity: critical
annotations:
  description: |-
    SSL certificate expires in less than 3 days
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance
    }})
/etc/prometheus/alerts/general.yml > probe_success
PROBE_FAILED_TCP (0 active)
alert: PROBE_FAILED_TCP
expr: probe_success
  < 1
for: 5m
labels:
  severity: warning
annotations:
  summary: Blackbox probe failed
/etc/prometheus/alerts/general.yml > reload_success
ALERTMANAGER_RELOAD_FAILED (0 active)
alert: ALERTMANAGER_RELOAD_FAILED
expr: alertmanager_config_last_reload_successful
  < 1
for: 1m
labels:
  application: prometheus
  severity: warning
annotations:
  summary: Reload of alertmanager config failed
PROMETHEUS_RELOAD_FAILED (0 active)
alert: PROMETHEUS_RELOAD_FAILED
expr: prometheus_config_last_reload_successful
  < 1
for: 1m
labels:
  application: prometheus
  severity: warning
annotations:
  summary: Reload of prometheus config failed
/etc/prometheus/alerts/general.yml > up_success
UP_FAILED (0 active)
alert: UP_FAILED
expr: up{ignore_down!="1"}
  < 1
for: 15m
labels:
  application: prometheus
  severity: warning
annotations:
  summary: Scrapes not functional
/etc/prometheus/alerts/node-exporter.yml > NodeExporter
CpuTooHot (0 active)
alert: CpuTooHot
expr: ((node_hwmon_temp_celsius
  * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{chip=~"pci0000:00_0000:00:18_3",label!="tctl"}
  > 98)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    Physical hardware component too hot
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host physical component too hot (instance {{ $labels.instance }})
HostClockNotSynchronising (0 active)
alert: HostClockNotSynchronising
expr: (min_over_time(node_timex_sync_status[1m])
  == 0 and node_timex_maxerror_seconds >= 16) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Clock not synchronising. Ensure NTP is configured on this host.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host clock not synchronising (instance {{ $labels.instance }})
HostClockSkew (0 active)
alert: HostClockSkew
expr: ((node_timex_offset_seconds
  > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds
  < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on (instance) group_left
  (nodename) node_uname_info{nodename=~".+"}
for: 10m
labels:
  severity: warning
annotations:
  description: |-
    Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host clock skew (instance {{ $labels.instance }})
HostConntrackLimit (0 active)
alert: HostConntrackLimit
expr: (node_nf_conntrack_entries
  / node_nf_conntrack_entries_limit > 0.8) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    The number of conntrack is approaching limit
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host conntrack limit (instance {{ $labels.instance }})
HostCpuStealNoisyNeighbor (0 active)
alert: HostCpuStealNoisyNeighbor
expr: (avg
  by (instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 >
  10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 10m
labels:
  severity: warning
annotations:
  description: |-
    CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
HostDiskWillFillIn24Hours (0 active)
alert: HostDiskWillFillIn24Hours
expr: ((node_filesystem_avail_bytes
  * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint)
  predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600)
  < 0 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on
  (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Filesystem is predicted to run out of space within the next 24 hours at current write rate
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
HostEdacCorrectableErrorsDetected (0 active)
alert: HostEdacCorrectableErrorsDetected
expr: (increase(node_edac_correctable_errors_total[1m])
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: info
annotations:
  description: |-
    Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
HostEdacUncorrectableErrorsDetected (0 active)
alert: HostEdacUncorrectableErrorsDetected
expr: (node_edac_uncorrectable_errors_total
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: warning
annotations:
  description: |-
    Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
HostFilesystemDeviceError (0 active)
alert: HostFilesystemDeviceError
expr: node_filesystem_device_error
  == 1
for: 2m
labels:
  severity: critical
annotations:
  description: |-
    {{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host filesystem device error (instance {{ $labels.instance }})
HostInodesWillFillIn24Hours (0 active)
alert: HostInodesWillFillIn24Hours
expr: (node_filesystem_files_free{fstype!="msdosfs"}
  / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h],
  24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"}
  == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Filesystem is predicted to run out of inodes within the next 24 hours at current write rate
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
HostKernelVersionDeviations (0 active)
alert: HostKernelVersionDeviations
expr: (count(sum
  by (kernel) (label_replace(node_uname_info, "kernel", "$1", "release",
  "([0-9]+.[0-9]+.[0-9]+).*"))) > 1) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 6h
labels:
  severity: warning
annotations:
  description: |-
    Different kernel versions are running
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host kernel version deviations (instance {{ $labels.instance }})
HostMemoryUnderMemoryPressure (0 active)
alert: HostMemoryUnderMemoryPressure
expr: (rate(node_vmstat_pgmajfault[1m])
  > 1000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 20m
labels:
  severity: warning
annotations:
  description: |-
    The node is under heavy memory pressure. High rate of major page faults
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host memory under memory pressure (instance {{ $labels.instance }})
HostNetworkBondDegraded (0 active)
alert: HostNetworkBondDegraded
expr: ((node_bonding_active
  - node_bonding_slaves) != 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Bond "{{ $labels.device }}" degraded on "{{ $labels.instance }}".
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host Network Bond Degraded (instance {{ $labels.instance }})
HostNetworkInterfaceSaturated (0 active)
alert: HostNetworkInterfaceSaturated
expr: ((rate(node_network_receive_bytes_total{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m])
  + rate(node_network_transmit_bytes_total{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m]))
  / node_network_speed_bytes{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}
  > 0.8 < 10000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host Network Interface Saturated (instance {{ $labels.instance }})
HostNetworkReceiveErrors (0 active)
alert: HostNetworkReceiveErrors
expr: (rate(node_network_receive_errs_total[2m])
  / rate(node_network_receive_packets_total[2m]) > 0.01) * on (instance) group_left
  (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host Network Receive Errors (instance {{ $labels.instance }})
HostNetworkTransmitErrors (0 active)
alert: HostNetworkTransmitErrors
expr: (rate(node_network_transmit_errs_total{device!~"^g09n03abbtesta|^g09n03amobrtra|^g09n03bbbtestb"}[2m])
  / rate(node_network_transmit_packets_total[2m]) > 0.01) * on (instance) group_left
  (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host Network Transmit Errors (instance {{ $labels.instance }})
HostNodeOvertemperatureAlarm (0 active)
alert: HostNodeOvertemperatureAlarm
expr: ((node_hwmon_temp_crit_alarm_celsius
  == 1) or (node_hwmon_temp_alarm == 1)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: critical
annotations:
  description: |-
    Physical node temperature alarm triggered
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host node overtemperature alarm (instance {{ $labels.instance }})
HostOomKillDetected (0 active)
alert: HostOomKillDetected
expr: (increase(node_vmstat_oom_kill[1m])
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: warning
annotations:
  description: |-
    OOM kill detected
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host OOM kill detected (instance {{ $labels.instance }})
HostOutOfDiskSpace (0 active)
alert: HostOutOfDiskSpace
expr: ((node_filesystem_avail_bytes
  * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint)
  node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Disk is almost full (< 10% left)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host out of disk space (instance {{ $labels.instance }})
HostOutOfInodes (0 active)
alert: HostOutOfInodes
expr: (node_filesystem_files_free{fstype!="msdosfs"}
  / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and on (instance,
  device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Disk is almost running out of available inodes (< 10% left)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host out of inodes (instance {{ $labels.instance }})
HostOutOfMemory (0 active)
alert: HostOutOfMemory
expr: (node_memory_MemAvailable_bytes
  / node_memory_MemTotal_bytes * 100 < 10) * on (instance) group_left (nodename)
  node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Node memory is filling up (< 10% left)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host out of memory (instance {{ $labels.instance }})
HostPhysicalComponentTooHot (0 active)
alert: HostPhysicalComponentTooHot
expr: ((node_hwmon_temp_celsius
  * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{chip!="pci0000:00_0000:00:18_3",label!="tctl"}
  > 75)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    Physical hardware component too hot
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host physical component too hot (instance {{ $labels.instance }})
HostRaidArrayGotInactive (0 active)
alert: HostRaidArrayGotInactive
expr: (node_md_state{state="inactive"}
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
labels:
  severity: critical
annotations:
  description: |-
    RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host RAID array got inactive (instance {{ $labels.instance }})
HostRaidDiskFailure (0 active)
alert: HostRaidDiskFailure
expr: (node_md_disks{state="failed"}
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host RAID disk failure (instance {{ $labels.instance }})
HostRequiresReboot (0 active)
alert: HostRequiresReboot
expr: (node_reboot_required
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 4h
labels:
  severity: info
annotations:
  description: |-
    {{ $labels.instance }} requires a reboot.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host requires reboot (instance {{ $labels.instance }})
HostSwapIsFillingUp (0 active)
alert: HostSwapIsFillingUp
expr: ((1
  - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on
  (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Swap is filling up (>80%)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host swap is filling up (instance {{ $labels.instance }})
HostSystemdServiceCrashed (0 active)
alert: HostSystemdServiceCrashed
expr: (node_systemd_unit_state{state="failed"}
  == 1) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 10m
labels:
  severity: warning
annotations:
  description: |-
    systemd service crashed
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host systemd service crashed (instance {{ $labels.instance }})
HostUnusualDiskIo (0 active)
alert: HostUnusualDiskIo
expr: (rate(node_disk_io_time_seconds_total[1m])
  > 0.5) * on (instance) group_left (nodename) node_uname_info{nodename!="gw05n02"}
for: 15m
labels:
  severity: warning
annotations:
  description: |-
    Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host unusual disk IO (instance {{ $labels.instance }})
HostUnusualDiskReadLatency (0 active)
alert: HostUnusualDiskReadLatency
expr: (rate(node_disk_read_time_seconds_total[1m])
  / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m])
  > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Disk latency is growing (read operations > 100ms)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host unusual disk read latency (instance {{ $labels.instance }})
HostUnusualDiskWriteLatency (0 active)
alert: HostUnusualDiskWriteLatency
expr: (rate(node_disk_write_time_seconds_total{nodename!="gw05n02"}[1m])
  / rate(node_disk_writes_completed_total{nodename!="gw05n02"}[1m]) > 0.1
  and rate(node_disk_writes_completed_total{nodename!="gw05n02"}[1m]) >
  0) * on (instance) group_left (nodename) node_uname_info{nodename!="gw05n02"}
for: 5m
labels:
  severity: warning
annotations:
  description: |-
    Disk latency is growing (write operations > 100ms)
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host unusual disk write latency (instance {{ $labels.instance }})
/etc/prometheus/alerts/smartctl-exporter.yml > SmartctlExporter
SmartCriticalWarning (0 active)
alert: SmartCriticalWarning
expr: smartctl_device_critical_warning
  > 0
for: 15m
labels:
  severity: critical
annotations:
  description: |-
    device has critical warning (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart critical warning (instance {{ $labels.instance }})
SmartDeviceTemperatureCritical (0 active)
alert: SmartDeviceTemperatureCritical
expr: smartctl_device_temperature
  > 80
for: 2m
labels:
  severity: critical
annotations:
  description: |-
    Device temperature critical  (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart device temperature critical (instance {{ $labels.instance }})
SmartDeviceTemperatureWarning (0 active)
alert: SmartDeviceTemperatureWarning
expr: smartctl_device_temperature
  > 60
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Device temperature  warning (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart device temperature warning (instance {{ $labels.instance }})
SmartMediaErrors (0 active)
alert: SmartMediaErrors
expr: smartctl_device_media_errors
  > 0
for: 15m
labels:
  severity: critical
annotations:
  description: |-
    device has media errors (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart media errors (instance {{ $labels.instance }})
SmartNvmeWearoutIndicator (0 active)
alert: SmartNvmeWearoutIndicator
expr: smartctl_device_available_spare{device=~"nvme.*"}
  < smartctl_device_available_spare_threshold{device=~"nvme.*"}
for: 15m
labels:
  severity: critical
annotations:
  description: |-
    NVMe device is wearing out (instance {{ $labels.instance }})
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Smart NVME Wearout Indicator (instance {{ $labels.instance }})