Rules

Selfmonitoring

2.343s ago

290.8us

Rule State Error Last Evaluation Evaluation Time
alert: SelfMonitoringAlwaysFiring expr: minute() >= 0 for: 1s labels: application: leonard_healthchecks severity: info ok 2.343s ago 275.4us

lowpref

8.521s ago

226.5us

Rule State Error Last Evaluation Evaluation Time
alert: LowGatewayPreference expr: gw_loadbalancing_pref{segment="1"} < 10 for: 1d labels: severity: page annotations: summary: | {{ .Labels.gateway }} has low gateway preference ({{ .Value }}) ok 8.521s ago 210.3us

BlackboxExporter

2.643s ago

1.32ms

Rule State Error Last Evaluation Evaluation Time
alert: BlackboxProbeFailed expr: probe_success == 0 for: 15m labels: severity: critical annotations: description: |- Probe failed VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe failed (instance {{ $labels.instance }}) ok 2.643s ago 310.5us
alert: BlackboxConfigurationReloadFailure expr: blackbox_exporter_config_last_reload_successful != 1 labels: severity: warning annotations: description: |- Blackbox configuration reload failure VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox configuration reload failure (instance {{ $labels.instance }}) ok 2.643s ago 43.06us
alert: BlackboxSslCertificateWillExpireSoon expr: 3 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 20 labels: severity: warning annotations: description: |- SSL certificate expires in less than 20 days VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }}) ok 2.643s ago 352.6us
alert: BlackboxSslCertificateWillExpireSoon expr: 0 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 3 labels: severity: critical annotations: description: |- SSL certificate expires in less than 3 days VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }}) ok 2.642s ago 180.3us
alert: BlackboxSslCertificateExpired expr: round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 0 labels: severity: critical annotations: description: |- SSL certificate has expired already VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate expired (instance {{ $labels.instance }}) ok 2.642s ago 195.2us
alert: BlackboxProbeSlowHttp expr: avg_over_time(probe_http_duration_seconds[1m]) > 1 for: 1m labels: severity: warning annotations: description: |- HTTP request took more than 1s VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe slow HTTP (instance {{ $labels.instance }}) ok 2.642s ago 157.5us
alert: BlackboxProbeSlowPing expr: avg_over_time(probe_icmp_duration_seconds[1m]) > 1 for: 1m labels: severity: warning annotations: description: |- Blackbox ping took more than 1s VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe slow ping (instance {{ $labels.instance }}) ok 2.642s ago 55.29us

general

808ms ago

848.9us

Rule State Error Last Evaluation Evaluation Time
alert: UP_FAILED expr: up{ignore_down!="1"} < 1 for: 15m labels: application: prometheus severity: warning annotations: summary: Scrapes not functional ok 808ms ago 602.1us
alert: PROMETHEUS_RELOAD_FAILED expr: prometheus_config_last_reload_successful < 1 for: 1m labels: application: prometheus severity: warning annotations: summary: Reload of prometheus config failed ok 807ms ago 43.15us
alert: ALERTMANAGER_RELOAD_FAILED expr: alertmanager_config_last_reload_successful < 1 for: 1m labels: application: prometheus severity: warning annotations: summary: Reload of alertmanager config failed ok 807ms ago 29.84us
alert: PROBE_FAILED_TCP expr: probe_success < 1 for: 15m labels: severity: warning annotations: summary: Blackbox probe failed ok 807ms ago 107.9us
alert: AlertmanagerClusterPeers expr: alertmanager_cluster_members < 2 for: 15m labels: severity: warning annotations: summary: Alertmanager cluster has too few members ok 807ms ago 28.48us

NodeExporter

2.062s ago

97.13ms

Rule State Error Last Evaluation Evaluation Time
alert: OsVersionUnknown expr: up{job="node"} unless on (instance) node_os_version{job="node"} for: 1s labels: severity: audit annotations: description: Os-Version could not be determined for {{ $labels.instance }}, this is expected behaviour for Debian < 12, so it it is at least Debian untable summary: Os-Version could not be determined for {{ $labels.instance }} ok 2.062s ago 794.6us
alert: HostOutOfMemory expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Node memory is filling up (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of memory (instance {{ $labels.instance }}) ok 2.061s ago 979.3us
alert: HostMemoryUnderMemoryPressure expr: (rate(node_vmstat_pgmajfault[1m]) > 2000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 30m labels: severity: warning annotations: description: |- The node is under heavy memory pressure. High rate of major page faults VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host memory under memory pressure (instance {{ $labels.instance }}) ok 2.06s ago 295.4us
alert: HostOutOfDiskSpace expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk is almost full (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of disk space (instance {{ $labels.instance }}) ok 2.06s ago 2.758ms
alert: HostDiskWillFillIn24Hours expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Filesystem is predicted to run out of space within the next 24 hours at current write rate VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) ok 2.057s ago 6.063ms
alert: HostOutOfInodes expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk is almost running out of available inodes (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of inodes (instance {{ $labels.instance }}) ok 2.051s ago 2.514ms
alert: HostFilesystemDeviceError expr: node_filesystem_device_error == 1 for: 2m labels: severity: critical annotations: description: |- {{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host filesystem device error (instance {{ $labels.instance }}) ok 2.049s ago 597.2us
alert: HostInodesWillFillIn24Hours expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Filesystem is predicted to run out of inodes within the next 24 hours at current write rate VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }}) ok 2.048s ago 6.394ms
alert: HostUnusualDiskReadLatency expr: (rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk latency is growing (read operations > 100ms) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk read latency (instance {{ $labels.instance }}) ok 2.042s ago 885.4us
alert: HostUnusualDiskWriteLatency expr: (rate(node_disk_write_time_seconds_total{nodename!="gw05n02"}[1m]) / rate(node_disk_writes_completed_total{nodename!="gw05n02"}[1m]) > 0.1 and rate(node_disk_writes_completed_total{nodename!="gw05n02"}[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename!="gw05n02"} for: 5m labels: severity: warning annotations: description: |- Disk latency is growing (write operations > 100ms) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk write latency (instance {{ $labels.instance }}) ok 2.041s ago 855.4us
alert: HostCpuStealNoisyNeighbor expr: (avg by (instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }}) ok 2.041s ago 7.663ms
alert: HostUnusualDiskIo expr: (rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on (instance) group_left (nodename) node_uname_info{nodename!="gw05n02"} for: 15m labels: severity: warning annotations: description: |- Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk IO (instance {{ $labels.instance }}) ok 2.033s ago 382.5us
alert: HostSwapIsFillingUp expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Swap is filling up (>80%) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host swap is filling up (instance {{ $labels.instance }}) ok 2.033s ago 734.1us
alert: HostSystemdServiceCrashed expr: (node_systemd_unit_state{state="failed"} == 1) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- systemd service crashed VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host systemd service crashed (instance {{ $labels.instance }}) ok 2.032s ago 26.73ms
alert: CpuTooHot expr: ((node_hwmon_temp_celsius * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{chip=~"pci0000:00_0000:00:18_3",label!="tctl"} > 98)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- Physical hardware component too hot VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host physical component too hot (instance {{ $labels.instance }}) ok 2.005s ago 515.9us
alert: HostPhysicalComponentTooHot expr: ((node_hwmon_temp_celsius * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{chip!="pci0000:00_0000:00:18_3",label!="tctl"} > 75)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- Physical hardware component too hot VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host physical component too hot (instance {{ $labels.instance }}) ok 2.005s ago 414.5us
alert: HostNodeOvertemperatureAlarm expr: ((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: critical annotations: description: |- Physical node temperature alarm triggered VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host node overtemperature alarm (instance {{ $labels.instance }}) ok 2.004s ago 256.6us
alert: HostRaidArrayGotInactive expr: (node_md_state{state="inactive"} > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: critical annotations: description: |- RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host RAID array got inactive (instance {{ $labels.instance }}) ok 2.004s ago 201us
alert: HostRaidDiskFailure expr: (node_md_disks{state="failed"} > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host RAID disk failure (instance {{ $labels.instance }}) ok 2.004s ago 187.1us
alert: HostKernelVersionDeviations expr: (count(sum by (kernel) (label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*"))) > 1) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 6h labels: severity: warning annotations: description: |- Different kernel versions are running VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host kernel version deviations (instance {{ $labels.instance }}) ok 2.004s ago 500.2us
alert: HostOomKillDetected expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: warning annotations: description: |- OOM kill detected VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host OOM kill detected (instance {{ $labels.instance }}) ok 2.004s ago 280.7us
alert: HostEdacCorrectableErrorsDetected expr: (increase(node_edac_correctable_errors_total[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: info annotations: description: |- Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }}) ok 2.003s ago 432.4us
alert: HostEdacUncorrectableErrorsDetected expr: (node_edac_uncorrectable_errors_total > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: warning annotations: description: |- Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }}) ok 2.003s ago 381.9us
alert: HostNetworkReceiveErrors expr: (rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Receive Errors (instance {{ $labels.instance }}) ok 2.003s ago 11.39ms
alert: HostNetworkTransmitErrors expr: (rate(node_network_transmit_errs_total{device!~"^g09n03abbtesta|^g09n03amobrtra|^g09n03bbbtestb"}[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Transmit Errors (instance {{ $labels.instance }}) ok 1.991s ago 11.01ms
alert: HostNetworkInterfaceSaturated expr: ((rate(node_network_receive_bytes_total{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m])) / node_network_speed_bytes{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"} > 0.8 < 10000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 15m labels: severity: warning annotations: description: |- The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Interface Saturated (instance {{ $labels.instance }}) ok 1.98s ago 9.631ms
alert: HostNetworkBondDegraded expr: ((node_bonding_active - node_bonding_slaves) != 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Bond "{{ $labels.device }}" degraded on "{{ $labels.instance }}". VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Bond Degraded (instance {{ $labels.instance }}) ok 1.971s ago 307.8us
alert: HostConntrackLimit expr: (node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- The number of conntrack is approaching limit VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host conntrack limit (instance {{ $labels.instance }}) ok 1.971s ago 797us
alert: HostClockSkew expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host clock skew (instance {{ $labels.instance }}) ok 1.97s ago 1.205ms
alert: HostClockNotSynchronising expr: (min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Clock not synchronising. Ensure NTP is configured on this host. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host clock not synchronising (instance {{ $labels.instance }}) ok 1.969s ago 695.5us
alert: HostRequiresReboot expr: (node_reboot_required > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 4h labels: severity: info annotations: description: |- {{ $labels.instance }} requires a reboot. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host requires reboot (instance {{ $labels.instance }}) ok 1.968s ago 1.224ms

postfix_smtp_status_deferred

3.576s ago

237.2us

Rule State Error Last Evaluation Evaluation Time
alert: Mail stuck in queue expr: postfix_showq_message_age_seconds_count > 0 for: 1h10m labels: application: mail severity: warning annotations: summary: Mail on {{ $labels.instance }} has mails stuck in queue ok 3.576s ago 206.1us

pve-guest-alerts

14.181s ago

1.617ms

Rule State Error Last Evaluation Evaluation Time
alert: GuestRunningWithoutOnboot expr: (pve_up{id=~".+"} == 1) * on (id) group_left () (pve_onboot_status{id=~".+"} or on (id) vector(0)) != 1 for: 10m labels: severity: info annotations: description: Guest {{ $labels.id }} läuft, hat aber kein onboot gesetzt. Prüfen, ob gewünscht. summary: Guest {{ $labels.id }} läuft ohne onboot ok 14.181s ago 1.057ms
alert: GuestNotRunningButOnboot expr: (pve_up{id=~".+"} == 0) * on (id) group_left () (pve_onboot_status{id=~".+"} == 1) for: 10m labels: severity: critical annotations: description: Guest {{ $labels.id }} hat onboot=1 gesetzt, aber läuft nicht. Sollte automatisch starten. summary: Guest {{ $labels.id }} läuft nicht, aber onboot=1 ok 14.18s ago 527.5us

SmartctlExporter

13.785s ago

397.9us

Rule State Error Last Evaluation Evaluation Time
alert: SmartDeviceTemperatureWarning expr: smartctl_device_temperature > 60 for: 2m labels: severity: warning annotations: description: |- Device temperature warning (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart device temperature warning (instance {{ $labels.instance }}) ok 13.785s ago 139.8us
alert: SmartDeviceTemperatureCritical expr: smartctl_device_temperature > 80 for: 2m labels: severity: critical annotations: description: |- Device temperature critical (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart device temperature critical (instance {{ $labels.instance }}) ok 13.785s ago 30.65us
alert: SmartCriticalWarning expr: smartctl_device_critical_warning > 0 for: 15m labels: severity: critical annotations: description: |- device has critical warning (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart critical warning (instance {{ $labels.instance }}) ok 13.785s ago 26.59us
alert: SmartMediaErrors expr: smartctl_device_media_errors > 0 for: 15m labels: severity: critical annotations: description: |- device has media errors (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart media errors (instance {{ $labels.instance }}) ok 13.785s ago 21.78us
alert: SmartNvmeWearoutIndicator expr: smartctl_device_available_spare{device=~"nvme.*"} < smartctl_device_available_spare_threshold{device=~"nvme.*"} for: 15m labels: severity: critical annotations: description: |- NVMe device is wearing out (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart NVME Wearout Indicator (instance {{ $labels.instance }}) ok 13.785s ago 160.7us