Rules

Selfmonitoring

10.051s ago

308.1us

Rule State Error Last Evaluation Evaluation Time
alert: SelfMonitoringAlwaysFiring expr: minute() >= 0 for: 1s labels: application: leonard_healthchecks severity: info ok 10.051s ago 295.1us

lowpref

11.964s ago

545.8us

Rule State Error Last Evaluation Evaluation Time
alert: LowGatewayPreference expr: gw_loadbalancing_pref{segment="1"} < 10 for: 1d labels: severity: page annotations: summary: | {{ .Labels.gateway }} has low gateway preference ({{ .Value }}) ok 11.964s ago 531.7us

BlackboxExporter

13.881s ago

1.244ms

Rule State Error Last Evaluation Evaluation Time
alert: BlackboxProbeFailed expr: probe_success{job!~"node_pve01|blackbox_tls_pve01"} == 0 for: 10m labels: severity: critical annotations: description: |- Probe failed VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe failed (instance {{ $labels.instance }}) ok 13.881s ago 341us
alert: BlackboxConfigurationReloadFailure expr: blackbox_exporter_config_last_reload_successful != 1 labels: severity: warning annotations: description: |- Blackbox configuration reload failure VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox configuration reload failure (instance {{ $labels.instance }}) ok 13.881s ago 226.4us
alert: BlackboxSslCertificateWillExpireSoon expr: 3 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 20 labels: severity: warning annotations: description: |- SSL certificate expires in less than 20 days VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }}) ok 13.881s ago 274.1us
alert: BlackboxSslCertificateWillExpireSoon expr: 0 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 3 labels: severity: critical annotations: description: |- SSL certificate expires in less than 3 days VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }}) ok 13.881s ago 96.58us
alert: BlackboxSslCertificateExpired expr: round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 0 labels: severity: critical annotations: description: |- SSL certificate has expired already VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate expired (instance {{ $labels.instance }}) ok 13.881s ago 83.12us
alert: BlackboxProbeSlowHttp expr: avg_over_time(probe_http_duration_seconds[1m]) > 1 for: 1m labels: severity: warning annotations: description: |- HTTP request took more than 1s VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe slow HTTP (instance {{ $labels.instance }}) ok 13.881s ago 51.04us
alert: BlackboxProbeSlowPing expr: avg_over_time(probe_icmp_duration_seconds[1m]) > 1 for: 1m labels: severity: warning annotations: description: |- Blackbox ping took more than 1s VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe slow ping (instance {{ $labels.instance }}) ok 13.881s ago 147.2us

probe_success

7.955s ago

277.5us

Rule State Error Last Evaluation Evaluation Time
alert: PROBE_FAILED_TCP expr: probe_success < 1 for: 5m labels: severity: warning annotations: summary: Blackbox probe failed ok 7.955s ago 254.6us

reload_success

13.066s ago

456.7us

Rule State Error Last Evaluation Evaluation Time
alert: PROMETHEUS_RELOAD_FAILED expr: prometheus_config_last_reload_successful < 1 for: 1m labels: application: prometheus severity: warning annotations: summary: Reload of prometheus config failed ok 13.066s ago 256.6us
alert: ALERTMANAGER_RELOAD_FAILED expr: alertmanager_config_last_reload_successful < 1 for: 1m labels: application: prometheus severity: warning annotations: summary: Reload of alertmanager config failed ok 13.066s ago 183.6us

up_success

11.081s ago

465.4us

Rule State Error Last Evaluation Evaluation Time
alert: UP_FAILED expr: up{ignore_down!="1"} < 1 for: 15m labels: application: prometheus severity: warning annotations: summary: Scrapes not functional ok 11.081s ago 453.1us

NodeExporter

13.302s ago

136.2ms

Rule State Error Last Evaluation Evaluation Time
alert: HostOutOfMemory expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Node memory is filling up (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of memory (instance {{ $labels.instance }}) ok 13.302s ago 1.001ms
alert: HostMemoryUnderMemoryPressure expr: (rate(node_vmstat_pgmajfault[1m]) > 1000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 20m labels: severity: warning annotations: description: |- The node is under heavy memory pressure. High rate of major page faults VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host memory under memory pressure (instance {{ $labels.instance }}) ok 13.301s ago 435.7us
alert: HostOutOfDiskSpace expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk is almost full (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of disk space (instance {{ $labels.instance }}) ok 13.301s ago 4.902ms
alert: HostDiskWillFillIn24Hours expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Filesystem is predicted to run out of space within the next 24 hours at current write rate VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) ok 13.296s ago 7.453ms
alert: HostOutOfInodes expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk is almost running out of available inodes (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of inodes (instance {{ $labels.instance }}) ok 13.288s ago 4.073ms
alert: HostFilesystemDeviceError expr: node_filesystem_device_error == 1 for: 2m labels: severity: critical annotations: description: |- {{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host filesystem device error (instance {{ $labels.instance }}) ok 13.284s ago 1.041ms
alert: HostInodesWillFillIn24Hours expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Filesystem is predicted to run out of inodes within the next 24 hours at current write rate VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }}) ok 13.283s ago 7.072ms
alert: HostUnusualDiskReadLatency expr: (rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk latency is growing (read operations > 100ms) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk read latency (instance {{ $labels.instance }}) ok 13.276s ago 4.003ms
alert: HostUnusualDiskWriteLatency expr: (rate(node_disk_write_time_seconds_total{nodename!="gw05n02"}[1m]) / rate(node_disk_writes_completed_total{nodename!="gw05n02"}[1m]) > 0.1 and rate(node_disk_writes_completed_total{nodename!="gw05n02"}[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename!="gw05n02"} for: 5m labels: severity: warning annotations: description: |- Disk latency is growing (write operations > 100ms) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk write latency (instance {{ $labels.instance }}) ok 13.272s ago 4.052ms
alert: HostCpuStealNoisyNeighbor expr: (avg by (instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }}) ok 13.268s ago 4.167ms
alert: HostUnusualDiskIo expr: (rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on (instance) group_left (nodename) node_uname_info{nodename!="gw05n02"} for: 15m labels: severity: warning annotations: description: |- Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk IO (instance {{ $labels.instance }}) ok 13.264s ago 1.39ms
alert: HostSwapIsFillingUp expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Swap is filling up (>80%) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host swap is filling up (instance {{ $labels.instance }}) ok 13.263s ago 618.6us
alert: HostSystemdServiceCrashed expr: (node_systemd_unit_state{state="failed"} == 1) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- systemd service crashed VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host systemd service crashed (instance {{ $labels.instance }}) ok 13.262s ago 24.95ms
alert: CpuTooHot expr: ((node_hwmon_temp_celsius * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{chip=~"pci0000:00_0000:00:18_3",label!="tctl"} > 98)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- Physical hardware component too hot VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host physical component too hot (instance {{ $labels.instance }}) ok 13.238s ago 1.812ms
alert: HostPhysicalComponentTooHot expr: ((node_hwmon_temp_celsius * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{chip!="pci0000:00_0000:00:18_3",label!="tctl"} > 75)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- Physical hardware component too hot VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host physical component too hot (instance {{ $labels.instance }}) ok 13.236s ago 1.9ms
alert: HostNodeOvertemperatureAlarm expr: ((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: critical annotations: description: |- Physical node temperature alarm triggered VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host node overtemperature alarm (instance {{ $labels.instance }}) ok 13.234s ago 666.8us
alert: HostRaidArrayGotInactive expr: (node_md_state{state="inactive"} > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: critical annotations: description: |- RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host RAID array got inactive (instance {{ $labels.instance }}) ok 13.233s ago 276.9us
alert: HostRaidDiskFailure expr: (node_md_disks{state="failed"} > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host RAID disk failure (instance {{ $labels.instance }}) ok 13.233s ago 321.7us
alert: HostKernelVersionDeviations expr: (count(sum by (kernel) (label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*"))) > 1) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 6h labels: severity: warning annotations: description: |- Different kernel versions are running VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host kernel version deviations (instance {{ $labels.instance }}) ok 13.233s ago 489.3us
alert: HostOomKillDetected expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: warning annotations: description: |- OOM kill detected VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host OOM kill detected (instance {{ $labels.instance }}) ok 13.232s ago 307.2us
alert: HostEdacCorrectableErrorsDetected expr: (increase(node_edac_correctable_errors_total[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: info annotations: description: |- Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }}) ok 13.232s ago 335us
alert: HostEdacUncorrectableErrorsDetected expr: (node_edac_uncorrectable_errors_total > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: warning annotations: description: |- Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }}) ok 13.232s ago 372.6us
alert: HostNetworkReceiveErrors expr: (rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Receive Errors (instance {{ $labels.instance }}) ok 13.232s ago 24.2ms
alert: HostNetworkTransmitErrors expr: (rate(node_network_transmit_errs_total{device!~"^g09n03abbtesta|^g09n03amobrtra|^g09n03bbbtestb"}[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Transmit Errors (instance {{ $labels.instance }}) ok 13.208s ago 24.83ms
alert: HostNetworkInterfaceSaturated expr: ((rate(node_network_receive_bytes_total{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m])) / node_network_speed_bytes{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"} > 0.8 < 10000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Interface Saturated (instance {{ $labels.instance }}) ok 13.183s ago 12.53ms
alert: HostNetworkBondDegraded expr: ((node_bonding_active - node_bonding_slaves) != 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Bond "{{ $labels.device }}" degraded on "{{ $labels.instance }}". VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Bond Degraded (instance {{ $labels.instance }}) ok 13.17s ago 274.1us
alert: HostConntrackLimit expr: (node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- The number of conntrack is approaching limit VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host conntrack limit (instance {{ $labels.instance }}) ok 13.17s ago 752.8us
alert: HostClockSkew expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host clock skew (instance {{ $labels.instance }}) ok 13.169s ago 999.3us
alert: HostClockNotSynchronising expr: (min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Clock not synchronising. Ensure NTP is configured on this host. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host clock not synchronising (instance {{ $labels.instance }}) ok 13.168s ago 563.4us
alert: HostRequiresReboot expr: (node_reboot_required > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 4h labels: severity: info annotations: description: |- {{ $labels.instance }} requires a reboot. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host requires reboot (instance {{ $labels.instance }}) ok 13.168s ago 336us

SmartctlExporter

10.024s ago

678.1us

Rule State Error Last Evaluation Evaluation Time
alert: SmartDeviceTemperatureWarning expr: smartctl_device_temperature > 60 for: 2m labels: severity: warning annotations: description: |- Device temperature warning (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart device temperature warning (instance {{ $labels.instance }}) ok 10.024s ago 167.2us
alert: SmartDeviceTemperatureCritical expr: smartctl_device_temperature > 80 for: 2m labels: severity: critical annotations: description: |- Device temperature critical (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart device temperature critical (instance {{ $labels.instance }}) ok 10.024s ago 170.5us
alert: SmartCriticalWarning expr: smartctl_device_critical_warning > 0 for: 15m labels: severity: critical annotations: description: |- device has critical warning (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart critical warning (instance {{ $labels.instance }}) ok 10.024s ago 112.6us
alert: SmartMediaErrors expr: smartctl_device_media_errors > 0 for: 15m labels: severity: critical annotations: description: |- device has media errors (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart media errors (instance {{ $labels.instance }}) ok 10.024s ago 38.4us
alert: SmartNvmeWearoutIndicator expr: smartctl_device_available_spare{device=~"nvme.*"} < smartctl_device_available_spare_threshold{device=~"nvme.*"} for: 15m labels: severity: critical annotations: description: |- NVMe device is wearing out (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart NVME Wearout Indicator (instance {{ $labels.instance }}) ok 10.024s ago 173.8us