Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/zabbix/zabbix.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxim Chudinov <maxim.chudinov@zabbix.com>2021-12-20 09:45:15 +0300
committerMaxim Chudinov <maxim.chudinov@zabbix.com>2021-12-20 09:45:15 +0300
commite29847c760282f80af539e24d5c4b5e16b00d916 (patch)
treee43878a79dc0cc12bee6f5fde2573f391e2f3c39 /templates
parentf538f3c882d43924db74f6452d2dbcf35c6ba4c5 (diff)
.........T [DEV-1998] fixed typos and changed README files for App templates
Diffstat (limited to 'templates')
-rw-r--r--templates/app/activemq_jmx/README.md8
-rw-r--r--templates/app/activemq_jmx/template_app_activemq_jmx.yaml2
-rw-r--r--templates/app/apache_agent/README.md18
-rw-r--r--templates/app/apache_agent/template_app_apache_agent.yaml4
-rw-r--r--templates/app/apache_http/README.md16
-rw-r--r--templates/app/apache_http/template_app_apache_http.yaml4
-rw-r--r--templates/app/aranet/README.md118
-rw-r--r--templates/app/aranet/aranet_cloud.yaml27
-rw-r--r--templates/app/ceph_agent2/README.md28
-rw-r--r--templates/app/ceph_agent2/template_app_ceph_agent2.yaml4
-rw-r--r--templates/app/certificate_agent2/README.md6
-rw-r--r--templates/app/certificate_agent2/template_app_certificate_agent2.yaml4
-rw-r--r--templates/app/cloudflare_http/README.md4
-rw-r--r--templates/app/cloudflare_http/template_app_cloudflare_http.yaml60
-rw-r--r--templates/app/docker/README.md76
-rw-r--r--templates/app/docker/template_app_docker.yaml6
-rw-r--r--templates/app/elasticsearch_http/README.md223
-rw-r--r--templates/app/elasticsearch_http/template_app_elasticsearch_http.yaml33
-rw-r--r--templates/app/etcd_http/README.md108
-rw-r--r--templates/app/etcd_http/template_app_etcd_http.yaml50
-rw-r--r--templates/app/exchange/README.md23
-rw-r--r--templates/app/exchange/template_app_exchange.yaml4
-rw-r--r--templates/app/exchange_active/README.md23
-rw-r--r--templates/app/exchange_active/template_app_exchange_active.yaml4
-rw-r--r--templates/app/generic_java_jmx/README.md176
-rw-r--r--templates/app/generic_java_jmx/template_app_generic_java_jmx.yaml6
-rw-r--r--templates/app/gitlab_http/README.md75
-rw-r--r--templates/app/gitlab_http/template_app_gitlab_http.yaml15
-rw-r--r--templates/app/hadoop_http/README.md195
-rw-r--r--templates/app/hadoop_http/template_app_hadoop_http.yaml4
-rw-r--r--templates/app/haproxy_agent/README.md115
-rw-r--r--templates/app/haproxy_agent/template_app_haproxy_agent.yaml60
-rw-r--r--templates/app/haproxy_http/README.md115
-rw-r--r--templates/app/haproxy_http/template_app_haproxy_http.yaml60
-rw-r--r--templates/app/iis_agent/README.md2
-rw-r--r--templates/app/iis_agent/template_app_iis_agent.yaml4
-rw-r--r--templates/app/iis_agent_active/README.md2
-rw-r--r--templates/app/iis_agent_active/template_app_iis_agent_active.yaml4
-rw-r--r--templates/app/jenkins/README.md88
-rw-r--r--templates/app/jenkins/template_app_jenkins.yaml11
-rw-r--r--templates/app/kafka_jmx/README.md190
-rw-r--r--templates/app/kafka_jmx/template_app_kafka_jmx.yaml11
-rw-r--r--templates/app/memcached/README.md90
-rw-r--r--templates/app/memcached/template_app_memcached.yaml12
-rw-r--r--templates/app/nginx_agent/README.md68
-rw-r--r--templates/app/nginx_agent/template_app_nginx_agent.yaml4
-rw-r--r--templates/app/nginx_http/README.md67
-rw-r--r--templates/app/nginx_http/template_app_nginx_http.yaml4
-rw-r--r--templates/app/nginx_plus_http/README.md154
-rw-r--r--templates/app/nginx_plus_http/template_app_nginx_plus_http.yaml130
-rw-r--r--templates/app/php-fpm_agent/README.md24
-rw-r--r--templates/app/php-fpm_agent/template_app_php-fpm_agent.yaml6
-rw-r--r--templates/app/php-fpm_http/README.md22
-rw-r--r--templates/app/php-fpm_http/template_app_php-fpm_http.yaml6
-rw-r--r--templates/app/rabbitmq_agent/README.md300
-rw-r--r--templates/app/rabbitmq_agent/template_app_rabbitmq_agent.yaml18
-rw-r--r--templates/app/rabbitmq_http/README.md288
-rw-r--r--templates/app/rabbitmq_http/template_app_rabbitmq_http.yaml12
-rw-r--r--templates/app/sharepoint_http/README.md30
-rw-r--r--templates/app/sharepoint_http/template_app_sharepoint_http.yaml23
-rw-r--r--templates/app/squid_snmp/README.md52
-rw-r--r--templates/app/squid_snmp/template_app_squid_snmp.yaml4
-rw-r--r--templates/app/systemd/README.md12
-rw-r--r--templates/app/systemd/template_app_systemd.yaml5
-rw-r--r--templates/app/tomcat_jmx/README.md29
-rw-r--r--templates/app/tomcat_jmx/template_app_tomcat_jmx.yaml32
-rw-r--r--templates/app/travis_http/README.md12
-rw-r--r--templates/app/travis_http/template_app_travis_ci_http.yaml13
-rw-r--r--templates/app/vault_http/README.md174
-rw-r--r--templates/app/vault_http/template_app_vault.yaml6
-rw-r--r--templates/app/wildfly_domain_jmx/README.md20
-rw-r--r--templates/app/wildfly_domain_jmx/template_app_wildfly_domain_jmx.yaml4
-rw-r--r--templates/app/wildfly_server_jmx/README.md100
-rw-r--r--templates/app/wildfly_server_jmx/template_app_wildfly_server_jmx.yaml10
-rw-r--r--templates/app/zabbix_server/README.md197
-rw-r--r--templates/app/zabbix_server/template_app_zabbix_server.yaml236
-rw-r--r--templates/app/zabbix_server_remote/README.md196
-rw-r--r--templates/app/zabbix_server_remote/template_app_remote_zabbix_server.yaml259
-rw-r--r--templates/app/zookeeper_http/README.md148
-rw-r--r--templates/app/zookeeper_http/template_app_zookeeper_http.yaml10
80 files changed, 2589 insertions, 2174 deletions
diff --git a/templates/app/activemq_jmx/README.md b/templates/app/activemq_jmx/README.md
index cd9d5e10e73..aaeb81b1667 100644
--- a/templates/app/activemq_jmx/README.md
+++ b/templates/app/activemq_jmx/README.md
@@ -85,8 +85,8 @@ There are no template links in this template.
|ActiveMQ |Broker {#JMXBROKERNAME}: Storage usage in percents |<p>Percent of store limit used.</p> |JMX |jmx[{#JMXOBJ},StorePercentUsage] |
|ActiveMQ |Broker {#JMXBROKERNAME}: Temp limit |<p>Disk limit, in bytes, used for non-persistent messages and temporary data before producers are blocked.</p> |JMX |jmx[{#JMXOBJ},TempLimit]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|ActiveMQ |Broker {#JMXBROKERNAME}: Temp usage in percents |<p>Percent of temp limit used.</p> |JMX |jmx[{#JMXOBJ},TempPercentUsage] |
-|ActiveMQ |Broker {#JMXBROKERNAME}: Messages enqueue rate |<p>Rate of messages that have been sent to the broker.</p> |JMX |jmx[{#JMXOBJ},TotalEnqueueCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|ActiveMQ |Broker {#JMXBROKERNAME}: Messages dequeue rate |<p>Rate of messages that have been delivered by the broker and acknowledged by consumers.</p> |JMX |jmx[{#JMXOBJ},TotalDequeueCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|ActiveMQ |Broker {#JMXBROKERNAME}: Messages enqueue rate |<p>Rate of messages that have been sent to the broker.</p> |JMX |jmx[{#JMXOBJ},TotalEnqueueCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|ActiveMQ |Broker {#JMXBROKERNAME}: Messages dequeue rate |<p>Rate of messages that have been delivered by the broker and acknowledged by consumers.</p> |JMX |jmx[{#JMXOBJ},TotalDequeueCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|ActiveMQ |Broker {#JMXBROKERNAME}: Consumers count total |<p>Number of consumers attached to this broker.</p> |JMX |jmx[{#JMXOBJ},TotalConsumerCount] |
|ActiveMQ |Broker {#JMXBROKERNAME}: Producers count total |<p>Number of producers attached to this broker.</p> |JMX |jmx[{#JMXOBJ},TotalProducerCount] |
|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Consumers count |<p>Number of consumers attached to this destination.</p> |JMX |jmx[{#JMXOBJ},ConsumerCount] |
@@ -94,8 +94,8 @@ There are no template links in this template.
|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Producers count |<p>Number of producers attached to this destination.</p> |JMX |jmx[{#JMXOBJ},ProducerCount] |
|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Producers count total on {#JMXBROKERNAME} |<p>Number of producers attached to the broker of this destination. Used to suppress destination's triggers when the count of producers on the broker is lower than threshold.</p> |JMX |jmx["org.apache.activemq:type=Broker,brokerName={#JMXBROKERNAME}",{$ACTIVEMQ.TOTAL.PRODUCERS.COUNT: "{#JMXDESTINATIONNAME}"}]<p>**Preprocessing**:</p><p>- IN_RANGE: `0 {$ACTIVEMQ.BROKER.PRODUCERS.MIN.HIGH}`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> {$ACTIVEMQ.BROKER.PRODUCERS.MIN.HIGH}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Memory usage in percents |<p>The percentage of the memory limit used.</p> |JMX |jmx[{#JMXOBJ},MemoryPercentUsage] |
-|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Messages enqueue rate |<p>Rate of messages that have been sent to the destination.</p> |JMX |jmx[{#JMXOBJ},EnqueueCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Messages dequeue rate |<p>Rate of messages that has been acknowledged (and removed) from the destination.</p> |JMX |jmx[{#JMXOBJ},DequeueCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Messages enqueue rate |<p>Rate of messages that have been sent to the destination.</p> |JMX |jmx[{#JMXOBJ},EnqueueCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Messages dequeue rate |<p>Rate of messages that has been acknowledged (and removed) from the destination.</p> |JMX |jmx[{#JMXOBJ},DequeueCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Queue size |<p>Number of messages on this destination, including any that have been dispatched but not acknowledged.</p> |JMX |jmx[{#JMXOBJ},QueueSize] |
|ActiveMQ |{#JMXBROKERNAME}: {#JMXDESTINATIONTYPE} {#JMXDESTINATIONNAME}: Expired messages count |<p>Number of messages that have been expired.</p> |JMX |jmx[{#JMXOBJ},ExpiredCount]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
diff --git a/templates/app/activemq_jmx/template_app_activemq_jmx.yaml b/templates/app/activemq_jmx/template_app_activemq_jmx.yaml
index 1076a6ab425..01caeebb96e 100644
--- a/templates/app/activemq_jmx/template_app_activemq_jmx.yaml
+++ b/templates/app/activemq_jmx/template_app_activemq_jmx.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-12-09T11:58:49Z'
+ date: '2021-12-19T15:19:27Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
diff --git a/templates/app/apache_agent/README.md b/templates/app/apache_agent/README.md
index c0384e79fc9..5225f0fa9cd 100644
--- a/templates/app/apache_agent/README.md
+++ b/templates/app/apache_agent/README.md
@@ -110,8 +110,8 @@ There are no template links in this template.
|Apache |Apache: Service ping |<p>-</p> |ZABBIX_PASSIVE |net.tcp.service[http,"{$APACHE.STATUS.HOST}","{$APACHE.STATUS.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|Apache |Apache: Service response time |<p>-</p> |ZABBIX_PASSIVE |net.tcp.service.perf[http,"{$APACHE.STATUS.HOST}","{$APACHE.STATUS.PORT}"] |
|Apache |Apache: Total bytes |<p>Total bytes served</p> |DEPENDENT |apache.bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total kBytes"]`</p><p>- MULTIPLIER: `1024`</p> |
-|Apache |Apache: Bytes per second |<p>Calculated as change rate for 'Total bytes' stat.</p><p>BytesPerSec is not used, as it counts average since last Apache server start.</p> |DEPENDENT |apache.bytes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total kBytes"]`</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND |
-|Apache |Apache: Requests per second |<p>Calculated as change rate for 'Total requests' stat.</p><p>ReqPerSec is not used, as it counts average since last Apache server start.</p> |DEPENDENT |apache.requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total Accesses"]`</p><p>- CHANGE_PER_SECOND |
+|Apache |Apache: Bytes per second |<p>Calculated as change rate for 'Total bytes' stat.</p><p>BytesPerSec is not used, as it counts average since last Apache server start.</p> |DEPENDENT |apache.bytes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total kBytes"]`</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND</p> |
+|Apache |Apache: Requests per second |<p>Calculated as change rate for 'Total requests' stat.</p><p>ReqPerSec is not used, as it counts average since last Apache server start.</p> |DEPENDENT |apache.requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total Accesses"]`</p><p>- CHANGE_PER_SECOND</p> |
|Apache |Apache: Total requests |<p>A total number of accesses</p> |DEPENDENT |apache.requests<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total Accesses"]`</p> |
|Apache |Apache: Uptime |<p>Service uptime in seconds</p> |DEPENDENT |apache.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.ServerUptimeSeconds`</p> |
|Apache |Apache: Version |<p>Service version</p> |DEPENDENT |apache.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.ServerVersion`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
@@ -138,18 +138,18 @@ There are no template links in this template.
|Apache |Apache: Connections total |<p>Number of total connections</p> |DEPENDENT |apache.connections[total{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.ConnsTotal`</p> |
|Apache |Apache: Bytes per request |<p>Average number of client requests per second</p> |DEPENDENT |apache.bytes[per_request{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.BytesPerReq`</p> |
|Apache |Apache: Number of async processes |<p>Number of async processes</p> |DEPENDENT |apache.process[num{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.Processes`</p> |
-|Zabbix_raw_items |Apache: Get status |<p>Getting data from a machine-readable version of the Apache status page.</p><p>https://httpd.apache.org/docs/current/mod/mod_status.html</p> |ZABBIX_PASSIVE |web.page.get["{$APACHE.STATUS.SCHEME}://{$APACHE.STATUS.HOST}:{$APACHE.STATUS.PORT}/{$APACHE.STATUS.PATH}"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
+|Zabbix_raw_items |Apache: Get status |<p>Getting data from a machine-readable version of the Apache status page.</p><p>https://httpd.apache.org/docs/current/mod/mod_status.html</p> |ZABBIX_PASSIVE |web.page.get["{$APACHE.STATUS.SCHEME}://{$APACHE.STATUS.HOST}:{$APACHE.STATUS.PORT}/{$APACHE.STATUS.PATH}"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Apache: Service is down |<p>-</p> |`{TEMPLATE_NAME:net.tcp.service[http,"{$APACHE.STATUS.HOST}","{$APACHE.STATUS.PORT}"].last()}=0` |AVERAGE |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Process is not running</p> |
-|Apache: Service response time is too high (over {$APACHE.RESPONSE_TIME.MAX.WARN}s for 5m) |<p>-</p> |`{TEMPLATE_NAME:net.tcp.service.perf[http,"{$APACHE.STATUS.HOST}","{$APACHE.STATUS.PORT}"].min(5m)}>{$APACHE.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Process is not running</p><p>- Apache: Service is down</p> |
-|Apache: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:apache.uptime.last()}<10m` |INFO |<p>Manual close: YES</p> |
-|Apache: Version has changed (new version: {ITEM.VALUE}) |<p>Apache version has changed. Ack to close.</p> |`{TEMPLATE_NAME:apache.version.diff()}=1 and {TEMPLATE_NAME:apache.version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Apache: Process is not running |<p>-</p> |`{TEMPLATE_NAME:proc.num["{$APACHE.PROCESS_NAME}"].last()}=0` |HIGH | |
-|Apache: Failed to fetch status page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`{TEMPLATE_NAME:web.page.get["{$APACHE.STATUS.SCHEME}://{$APACHE.STATUS.HOST}:{$APACHE.STATUS.PORT}/{$APACHE.STATUS.PATH}"].nodata(30m)}=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Process is not running</p><p>- Apache: Service is down</p> |
+|Apache: Service is down |<p>-</p> |`last(/Apache by Zabbix agent/net.tcp.service[http,"{$APACHE.STATUS.HOST}","{$APACHE.STATUS.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Process is not running</p> |
+|Apache: Service response time is too high (over {$APACHE.RESPONSE_TIME.MAX.WARN}s for 5m) |<p>-</p> |`min(/Apache by Zabbix agent/net.tcp.service.perf[http,"{$APACHE.STATUS.HOST}","{$APACHE.STATUS.PORT}"],5m)>{$APACHE.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Process is not running</p><p>- Apache: Service is down</p> |
+|Apache: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Apache by Zabbix agent/apache.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|Apache: Version has changed (new version: {ITEM.VALUE}) |<p>Apache version has changed. Ack to close.</p> |`last(/Apache by Zabbix agent/apache.version,#1)<>last(/Apache by Zabbix agent/apache.version,#2) and length(last(/Apache by Zabbix agent/apache.version))>0` |INFO |<p>Manual close: YES</p> |
+|Apache: Process is not running |<p>-</p> |`last(/Apache by Zabbix agent/proc.num["{$APACHE.PROCESS_NAME}"])=0` |HIGH | |
+|Apache: Failed to fetch status page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/Apache by Zabbix agent/web.page.get["{$APACHE.STATUS.SCHEME}://{$APACHE.STATUS.HOST}:{$APACHE.STATUS.PORT}/{$APACHE.STATUS.PATH}"],30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Process is not running</p><p>- Apache: Service is down</p> |
## Feedback
diff --git a/templates/app/apache_agent/template_app_apache_agent.yaml b/templates/app/apache_agent/template_app_apache_agent.yaml
index 809513581d1..0be251d9761 100644
--- a/templates/app/apache_agent/template_app_apache_agent.yaml
+++ b/templates/app/apache_agent/template_app_apache_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-06-22T16:33:40Z'
+ date: '2021-12-19T15:19:27Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -16,7 +16,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/384764-discussion-thread-for-official-zabbix-template-apache
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/apache_http/README.md b/templates/app/apache_http/README.md
index 43ba30c4829..d8c0eabcb69 100644
--- a/templates/app/apache_http/README.md
+++ b/templates/app/apache_http/README.md
@@ -105,8 +105,8 @@ There are no template links in this template.
|Apache |Apache: Service ping |<p>-</p> |SIMPLE |net.tcp.service[http,"{HOST.CONN}","{$APACHE.STATUS.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|Apache |Apache: Service response time |<p>-</p> |SIMPLE |net.tcp.service.perf[http,"{HOST.CONN}","{$APACHE.STATUS.PORT}"] |
|Apache |Apache: Total bytes |<p>Total bytes served</p> |DEPENDENT |apache.bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total kBytes"]`</p><p>- MULTIPLIER: `1024`</p> |
-|Apache |Apache: Bytes per second |<p>Calculated as change rate for 'Total bytes' stat.</p><p>BytesPerSec is not used, as it counts average since last Apache server start.</p> |DEPENDENT |apache.bytes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total kBytes"]`</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND |
-|Apache |Apache: Requests per second |<p>Calculated as change rate for 'Total requests' stat.</p><p>ReqPerSec is not used, as it counts average since last Apache server start.</p> |DEPENDENT |apache.requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total Accesses"]`</p><p>- CHANGE_PER_SECOND |
+|Apache |Apache: Bytes per second |<p>Calculated as change rate for 'Total bytes' stat.</p><p>BytesPerSec is not used, as it counts average since last Apache server start.</p> |DEPENDENT |apache.bytes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total kBytes"]`</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND</p> |
+|Apache |Apache: Requests per second |<p>Calculated as change rate for 'Total requests' stat.</p><p>ReqPerSec is not used, as it counts average since last Apache server start.</p> |DEPENDENT |apache.requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total Accesses"]`</p><p>- CHANGE_PER_SECOND</p> |
|Apache |Apache: Total requests |<p>A total number of accesses</p> |DEPENDENT |apache.requests<p>**Preprocessing**:</p><p>- JSONPATH: `$["Total Accesses"]`</p> |
|Apache |Apache: Uptime |<p>Service uptime in seconds</p> |DEPENDENT |apache.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.ServerUptimeSeconds`</p> |
|Apache |Apache: Version |<p>Service version</p> |DEPENDENT |apache.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.ServerVersion`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
@@ -129,17 +129,17 @@ There are no template links in this template.
|Apache |Apache: Connections total |<p>Number of total connections</p> |DEPENDENT |apache.connections[total{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.ConnsTotal`</p> |
|Apache |Apache: Bytes per request |<p>Average number of client requests per second</p> |DEPENDENT |apache.bytes[per_request{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.BytesPerReq`</p> |
|Apache |Apache: Number of async processes |<p>Number of async processes</p> |DEPENDENT |apache.process[num{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.Processes`</p> |
-|Zabbix_raw_items |Apache: Get status |<p>Getting data from a machine-readable version of the Apache status page.</p><p>https://httpd.apache.org/docs/current/mod/mod_status.html</p> |HTTP_AGENT |apache.get_status<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
+|Zabbix_raw_items |Apache: Get status |<p>Getting data from a machine-readable version of the Apache status page.</p><p>https://httpd.apache.org/docs/current/mod/mod_status.html</p> |HTTP_AGENT |apache.get_status<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Apache: Service is down |<p>-</p> |`{TEMPLATE_NAME:net.tcp.service[http,"{HOST.CONN}","{$APACHE.STATUS.PORT}"].last()}=0` |AVERAGE |<p>Manual close: YES</p> |
-|Apache: Service response time is too high (over {$APACHE.RESPONSE_TIME.MAX.WARN}s for 5m) |<p>-</p> |`{TEMPLATE_NAME:net.tcp.service.perf[http,"{HOST.CONN}","{$APACHE.STATUS.PORT}"].min(5m)}>{$APACHE.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Service is down</p> |
-|Apache: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:apache.uptime.last()}<10m` |INFO |<p>Manual close: YES</p> |
-|Apache: Version has changed (new version: {ITEM.VALUE}) |<p>Apache version has changed. Ack to close.</p> |`{TEMPLATE_NAME:apache.version.diff()}=1 and {TEMPLATE_NAME:apache.version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Apache: Failed to fetch status page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`{TEMPLATE_NAME:apache.get_status.nodata(30m)}=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Service is down</p> |
+|Apache: Service is down |<p>-</p> |`last(/Apache by HTTP/net.tcp.service[http,"{HOST.CONN}","{$APACHE.STATUS.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|Apache: Service response time is too high (over {$APACHE.RESPONSE_TIME.MAX.WARN}s for 5m) |<p>-</p> |`min(/Apache by HTTP/net.tcp.service.perf[http,"{HOST.CONN}","{$APACHE.STATUS.PORT}"],5m)>{$APACHE.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Service is down</p> |
+|Apache: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Apache by HTTP/apache.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|Apache: Version has changed (new version: {ITEM.VALUE}) |<p>Apache version has changed. Ack to close.</p> |`last(/Apache by HTTP/apache.version,#1)<>last(/Apache by HTTP/apache.version,#2) and length(last(/Apache by HTTP/apache.version))>0` |INFO |<p>Manual close: YES</p> |
+|Apache: Failed to fetch status page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/Apache by HTTP/apache.get_status,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Apache: Service is down</p> |
## Feedback
diff --git a/templates/app/apache_http/template_app_apache_http.yaml b/templates/app/apache_http/template_app_apache_http.yaml
index 21d7753af59..91893e8f79f 100644
--- a/templates/app/apache_http/template_app_apache_http.yaml
+++ b/templates/app/apache_http/template_app_apache_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-06-22T16:33:44Z'
+ date: '2021-12-19T15:19:28Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -16,7 +16,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/384764-discussion-thread-for-official-zabbix-template-apache
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/aranet/README.md b/templates/app/aranet/README.md
index 547840c6033..c9e9f7ebea9 100644
--- a/templates/app/aranet/README.md
+++ b/templates/app/aranet/README.md
@@ -17,20 +17,25 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|-----------------------------------------|-----------------------------------------------|----------------------------|
-| {$ARANET.API.ENDPOINT} | <p>Aranet Cloud API endpoint</p> | `https://aranet.cloud/api` |
-| {$ARANET.API.PASSWORD} | <p>Aranet Cloud password</p> | `<PUT YOUR PASSWORD>` |
-| {$ARANET.API.SPACE_NAME} | <p>Aranet Cloud space name</p> | `<PUT YOUR SPACE NAME>` |
-| {$ARANET.API.USERNAME} | <p>Aranet Cloud username</p> | `<PUT YOUR USERNAME>` |
-| {$ARANET.BATT.VOLTAGE.MIN.CRIT} | <p>Battery voltage critical threshold</p> | `2` |
-| {$ARANET.BATT.VOLTAGE.MIN.WARN} | <p>Battery voltage warning threshold</p> | `1` |
-| {$ARANET.CO2.MAX.CRIT} | <p>CO2 critical threshold</p> | `1000` |
-| {$ARANET.CO2.MAX.WARN} | <p>CO2 warning threshold</p> | `600` |
-| {$ARANET.HUMIDITY.MAX.WARN} | <p>Maximum humidity threshold</p> | `70` |
-| {$ARANET.HUMIDITY.MIN.WARN} | <p>Minimum humidity threshold</p> | `20` |
-| {$ARANET.LLD.FILTER.SENSOR.MATCHES} | <p>Filter of discoverable sensors</p> | `.+` |
-| {$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES} | <p>Filter to exclude discoverable sensors</p> | `CHANGE_IF_NEEDED` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$ARANET.API.ENDPOINT} |<p>Aranet Cloud API endpoint.</p> |`https://aranet.cloud/api` |
+|{$ARANET.API.PASSWORD} |<p>Aranet Cloud password.</p> |`<PUT YOUR PASSWORD>` |
+|{$ARANET.API.SPACE_NAME} |<p>Aranet Cloud organization name.</p> |`<PUT YOUR SPACE NAME>` |
+|{$ARANET.API.USERNAME} |<p>Aranet Cloud username.</p> |`<PUT YOUR USERNAME>` |
+|{$ARANET.BATT.VOLTAGE.MIN.CRIT} |<p>Battery voltage critical threshold.</p> |`2` |
+|{$ARANET.BATT.VOLTAGE.MIN.WARN} |<p>Battery voltage warning threshold.</p> |`1` |
+|{$ARANET.CO2.MAX.CRIT} |<p>CO2 critical threshold.</p> |`1000` |
+|{$ARANET.CO2.MAX.WARN} |<p>CO2 warning threshold.</p> |`600` |
+|{$ARANET.HUMIDITY.MAX.WARN} |<p>Maximum humidity threshold.</p> |`70` |
+|{$ARANET.HUMIDITY.MIN.WARN} |<p>Minimum humidity threshold.</p> |`20` |
+|{$ARANET.LAST_UPDATE.MAX.WARN} |<p>Data update delay threshold.</p> |`1h` |
+|{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES} |<p>Filter of discoverable sensors by gateway id.</p> |`.+` |
+|{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES} |<p>Filter of discoverable sensors by gateway name.</p> |`.+` |
+|{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES} |<p>Filter to exclude discoverable sensors by gateway name.</p> |`CHANGE_IF_NEEDED` |
+|{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES} |<p>Filter of discoverable sensors by id.</p> |`.+` |
+|{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES} |<p>Filter of discoverable sensors by name.</p> |`.+` |
+|{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES} |<p>Filter to exclude discoverable sensors by name.</p> |`CHANGE_IF_NEEDED` |
## Template links
@@ -38,40 +43,69 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|----------------------------------------|--------------------------------------------------------------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Temperature sensors discovery | <p>Discovery for Aranet Cloud temperature sensors</p> | DEPENDENT | aranet.temp.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Temperature`</p> |
-| Humidity sensors discovery | <p>Discovery for Aranet Cloud humidity sensors</p> | DEPENDENT | aranet.humidity.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Humidity`</p> |
-| RSSI sensors discovery | <p>Discovery for Aranet Cloud RSSI sensors</p> | DEPENDENT | aranet.rssi.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `RSSI`</p> |
-| Battery voltage sensors discovery | <p>Discovery for Aranet Cloud battery voltage sensors</p> | DEPENDENT | aranet.battery.voltage.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Battery voltage`</p> |
-| CO2 sensors discovery | <p>Discovery for Aranet Cloud CO2 sensors</p> | DEPENDENT | aranet.co2.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `CO₂`</p> |
-| Atmospheric pressure sensors discovery | <p>Discovery for Aranet Cloud atmospheric pressure sensors</p> | DEPENDENT | aranet.pressure.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Atmospheric Pressure`</p> |
-| Voltage sensors discovery | <p>Discovery for Aranet Cloud voltage sensors</p> | DEPENDENT | aranet.voltage.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Voltage`</p> |
-| Weight sensors discovery | <p>Discovery for Aranet Cloud weight sensors</p> | DEPENDENT | aranet.weight.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Weight`</p> |
-| Volumetric Water Content discovery | <p>Discovery for Aranet Cloud volumetric Water Content sensors</p> | DEPENDENT | aranet.olumetric.water.content.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Volumetric Water Content`</p> |
-| PPFD sensors discovery | <p>Discovery for Aranet Cloud PPFD sensors</p> | DEPENDENT | aranet.ppfd.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `PPFD`</p> |
-| Distance sensors discovery | <p>Discovery for Aranet Cloud distance sensors</p> | DEPENDENT | aranet.distance.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Distance`</p> |
-| Illuminance sensors discovery | <p>Discovery for Aranet Cloud illuminance sensors</p> | DEPENDENT | aranet.illuminance.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Illuminance`</p> |
-| pH sensors discovery | <p>Discovery for Aranet Cloud pH sensors</p> | DEPENDENT | aranet.ph.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `pH`</p> |
-| Current sensors discovery | <p>Discovery for Aranet Cloud current sensors</p> | DEPENDENT | aranet.current.discovery<p>**Filter**:</p>AND <p>- A: {#SENSOR} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.MATCHES}`</p><p>- B: {#SENSOR} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR.NOT_MATCHES}`</p><p>- C: {#METRIC} MATCHES_REGEX `Current`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Temperature discovery |<p>Discovery for Aranet Cloud temperature sensors</p> |DEPENDENT |aranet.temp.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Temperature`</p> |
+|Humidity discovery |<p>Discovery for Aranet Cloud humidity sensors</p> |DEPENDENT |aranet.humidity.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Humidity`</p> |
+|RSSI discovery |<p>Discovery for Aranet Cloud RSSI sensors</p> |DEPENDENT |aranet.rssi.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `RSSI`</p> |
+|Battery voltage discovery |<p>Discovery for Aranet Cloud Battery voltage sensors</p> |DEPENDENT |aranet.battery.voltage.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Battery voltage`</p> |
+|CO2 discovery |<p>Discovery for Aranet Cloud CO2 sensors</p> |DEPENDENT |aranet.co2.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `CO₂`</p> |
+|Atmospheric pressure discovery |<p>Discovery for Aranet Cloud atmospheric pressure sensors</p> |DEPENDENT |aranet.pressure.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Atmospheric Pressure`</p> |
+|Voltage discovery |<p>Discovery for Aranet Cloud Voltage sensors</p> |DEPENDENT |aranet.voltage.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Voltage`</p> |
+|Weight discovery |<p>Discovery for Aranet Cloud Weight sensors</p> |DEPENDENT |aranet.weight.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Weight`</p> |
+|Volumetric Water Content discovery |<p>Discovery for Aranet Cloud Volumetric Water Content sensors</p> |DEPENDENT |aranet.volum_water_content.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Volumetric Water Content`</p> |
+|PPFD discovery |<p>Discovery for Aranet Cloud PPFD sensors</p> |DEPENDENT |aranet.ppfd.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `PPFD`</p> |
+|Distance discovery |<p>Discovery for Aranet Cloud Distance sensors</p> |DEPENDENT |aranet.distance.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Distance`</p> |
+|Illuminance discovery |<p>Discovery for Aranet Cloud Illuminance sensors</p> |DEPENDENT |aranet.illuminance.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Illuminance`</p> |
+|pH discovery |<p>Discovery for Aranet Cloud pH sensors</p> |DEPENDENT |aranet.ph.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `pH`</p> |
+|Current discovery |<p>Discovery for Aranet Cloud Current sensors</p> |DEPENDENT |aranet.current.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Current`</p> |
+|Soil Dielectric Permittivity discovery |<p>Discovery for Aranet Cloud Soil Dielectric Permittivity sensors</p> |DEPENDENT |aranet.soil_dielectric_perm.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Soil Dielectric Permittivity`</p> |
+|Soil Electrical Conductivity discovery |<p>Discovery for Aranet Cloud Soil Electrical Conductivity sensors</p> |DEPENDENT |aranet.soil_electric_cond.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Soil Electrical Conductivity`</p> |
+|Pore Electrical Conductivity discovery |<p>Discovery for Aranet Cloud Pore Electrical Conductivity sensors</p> |DEPENDENT |aranet.pore_electric_cond.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Pore Electrical Conductivity`</p> |
+|Pulses discovery |<p>Discovery for Aranet Cloud Pulses sensors</p> |DEPENDENT |aranet.pulses.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Pulses`</p> |
+|Pulses Cumulative discovery |<p>Discovery for Aranet Cloud Pulses Cumulative sensors</p> |DEPENDENT |aranet.pulses_cumulative.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Pulses Cumulative`</p> |
+|Differential Pressure discovery |<p>Discovery for Aranet Cloud Differential Pressure sensors</p> |DEPENDENT |aranet.diff_pressure.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Differential Pressure`</p> |
+|Last update discovery |<p>Discovery for Aranet Cloud Last update metric</p> |DEPENDENT |aranet.last_update.discovery<p>**Filter**:</p>AND <p>- {#SENSOR_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}`</p><p>- {#SENSOR_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}`</p><p>- {#SENSOR_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}`</p><p>- {#GATEWAY_NAME} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}`</p><p>- {#GATEWAY_NAME} NOT_MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}`</p><p>- {#GATEWAY_ID} MATCHES_REGEX `{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}`</p><p>- {#METRIC} MATCHES_REGEX `Last update`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|---------------------------|-------------------------------------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Zabbix_raw_items | Aranet: Sensors discovery | <p>Discovery for Aranet Cloud sensors</p> | DEPENDENT | aranet.sensor.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `15m`</p> |
-| Zabbix_raw_items | Aranet: Get data | | SCRIPT | aranet.get_data |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.temp["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.humidity["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.rssi["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.battery.voltage["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.co2["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.pressure["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.voltage["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.weight["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.volumetric.water.content["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.ppfd["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.distance["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.illuminance["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.ph["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.current["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.soil_dielectric_perm["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.soil_electric_cond["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.pore_electric_cond["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.pulses["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.pulses_cumulative["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.diff_pressure["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p> |
+|Aranet |{#METRIC}: [{#GATEWAY_NAME}] {#SENSOR_NAME} |<p>-</p> |DEPENDENT |aranet.last_update["{#GATEWAY_ID}", "{#SENSOR_ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.id == "{#SENSOR_ID}" && @.name == "{#SENSOR_NAME}")].metrics[?(@.name == "{#METRIC}")].value.first()`</p><p>- JAVASCRIPT: `return Math.floor(Date.now()/1000 - Number(value));`</p> |
+|Zabbix_raw_items |Aranet: Sensors discovery |<p>Discovery for Aranet Cloud sensors</p> |DEPENDENT |aranet.sensor.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `15m`</p> |
+|Zabbix_raw_items |Aranet: Get data |<p>-</p> |SCRIPT |aranet.get_data<p>**Expression**:</p>`The text is too long. Please see the template.` |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|----------------------------------------------------------------------------------------------------------------------------|-------------|---------------------------------------------------------------------------------------------------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|
-| {#METRIC}: Low humidity on "{#SENSOR}" (below {$ARANET.HUMIDITY.MIN.WARN:"{#SENSOR}"}{#UNIT} for 5m) | <p>-</p> | `{TEMPLATE_NAME:aranet.humidity["{#ID}"].max(5m)} < {$ARANET.HUMIDITY.MIN.WARN:"{#SENSOR}"}` | WARNING | <p>**Depends on**:</p><p>- {#METRIC}: High humidity on "{#SENSOR}" (over {$ARANET.HUMIDITY.MAX.WARN:"{#SENSOR}"}{#UNIT} for 5m)</p> |
-| {#METRIC}: High humidity on "{#SENSOR}" (over {$ARANET.HUMIDITY.MAX.WARN:"{#SENSOR}"}{#UNIT} for 5m) | <p>-</p> | `{TEMPLATE_NAME:aranet.humidity["{#ID}"].min(5m)} > {$ARANET.HUMIDITY.MAX.WARN:"{#SENSOR}"}` | HIGH | |
-| {#METRIC}: Low battery voltage on "{#SENSOR}" (below {$ARANET.BATT.VOLTAGE.MIN.WARN:"{#SENSOR}"}{#UNIT} for 5m) | <p>-</p> | `{TEMPLATE_NAME:aranet.battery.voltage["{#ID}"].max(5m)} < {$ARANET.BATT.VOLTAGE.MIN.WARN:"{#SENSOR}"}` | WARNING | <p>**Depends on**:</p><p>- {#METRIC}: Critically low battery voltage on "{#SENSOR}" (below {$ARANET.BATT.VOLTAGE.MIN.CRIT:"{#SENSOR}"}{#UNIT} for 5m)</p> |
-| {#METRIC}: Critically low battery voltage on "{#SENSOR}" (below {$ARANET.BATT.VOLTAGE.MIN.CRIT:"{#SENSOR}"}{#UNIT} for 5m) | <p>-</p> | `{TEMPLATE_NAME:aranet.battery.voltage["{#ID}"].max(5m)} < {$ARANET.BATT.VOLTAGE.MIN.CRIT:"{#SENSOR}"}` | HIGH | |
-| {#METRIC}: High CO2 level on "{#SENSOR}" (over {$ARANET.CO2.MAX.WARN:"{#SENSOR}"}{#UNIT} for 5m) | <p>-</p> | `{TEMPLATE_NAME:aranet.co2["{#ID}"].min(5m)} > {$ARANET.CO2.MAX.WARN:"{#SENSOR}"}` | WARNING | <p>**Depends on**:</p><p>- {#METRIC}: Critically high CO2 level on "{#SENSOR}" (over {$ARANET.CO2.MAX.CRIT:"{#SENSOR}"}{#UNIT} for 5m)</p> |
-| {#METRIC}: Critically high CO2 level on "{#SENSOR}" (over {$ARANET.CO2.MAX.CRIT:"{#SENSOR}"}{#UNIT} for 5m) | <p>-</p> | `{TEMPLATE_NAME:aranet.co2["{#ID}"].min(5m)} > {$ARANET.CO2.MAX.CRIT:"{#SENSOR}"}` | HIGH | |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|{#METRIC}: Low humidity on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (below {$ARANET.HUMIDITY.MIN.WARN:"{#SENSOR_NAME}"}{#UNIT} for 5m) | |`max(/Aranet Cloud/aranet.humidity["{#GATEWAY_ID}", "{#SENSOR_ID}"],5m) < {$ARANET.HUMIDITY.MIN.WARN:"{#SENSOR_NAME}"}` |WARNING |<p>**Depends on**:</p><p>- {#METRIC}: High humidity on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (over {$ARANET.HUMIDITY.MAX.WARN:"{#SENSOR_NAME}"}{#UNIT} for 5m)</p> |
+|{#METRIC}: High humidity on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (over {$ARANET.HUMIDITY.MAX.WARN:"{#SENSOR_NAME}"}{#UNIT} for 5m) | |`min(/Aranet Cloud/aranet.humidity["{#GATEWAY_ID}", "{#SENSOR_ID}"],5m) > {$ARANET.HUMIDITY.MAX.WARN:"{#SENSOR_NAME}"}` |HIGH | |
+|{#METRIC}: Low battery voltage on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (below {$ARANET.BATT.VOLTAGE.MIN.WARN:"{#SENSOR_NAME}"}{#UNIT} for 5m) |<p>-</p> |`max(/Aranet Cloud/aranet.battery.voltage["{#GATEWAY_ID}", "{#SENSOR_ID}"],5m) < {$ARANET.BATT.VOLTAGE.MIN.WARN:"{#SENSOR_NAME}"}` |WARNING |<p>**Depends on**:</p><p>- {#METRIC}: Critically low battery voltage on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (below {$ARANET.BATT.VOLTAGE.MIN.CRIT:"{#SENSOR_NAME}"}{#UNIT} for 5m)</p> |
+|{#METRIC}: Critically low battery voltage on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (below {$ARANET.BATT.VOLTAGE.MIN.CRIT:"{#SENSOR_NAME}"}{#UNIT} for 5m) |<p>-</p> |`max(/Aranet Cloud/aranet.battery.voltage["{#GATEWAY_ID}", "{#SENSOR_ID}"],5m) < {$ARANET.BATT.VOLTAGE.MIN.CRIT:"{#SENSOR_NAME}"}` |HIGH | |
+|{#METRIC}: High CO2 level on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (over {$ARANET.CO2.MAX.WARN:"{#SENSOR_NAME}"}{#UNIT} for 5m) |<p>-</p> |`min(/Aranet Cloud/aranet.co2["{#GATEWAY_ID}", "{#SENSOR_ID}"],5m) > {$ARANET.CO2.MAX.WARN:"{#SENSOR_NAME}"}` |WARNING |<p>**Depends on**:</p><p>- {#METRIC}: Critically high CO2 level on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (over {$ARANET.CO2.MAX.CRIT:"{#SENSOR_NAME}"}{#UNIT} for 5m)</p> |
+|{#METRIC}: Critically high CO2 level on "[{#GATEWAY_NAME}] {#SENSOR_NAME}" (over {$ARANET.CO2.MAX.CRIT:"{#SENSOR_NAME}"}{#UNIT} for 5m) |<p>-</p> |`min(/Aranet Cloud/aranet.co2["{#GATEWAY_ID}", "{#SENSOR_ID}"],5m) > {$ARANET.CO2.MAX.CRIT:"{#SENSOR_NAME}"}` |HIGH | |
+|{#METRIC}: Sensor data "[{#GATEWAY_NAME}] {#SENSOR_NAME}" is not updated (more than {$ARANET.LAST_UPDATE.MAX.WARN:"{#SENSOR_NAME}"}) |<p>-</p> |`last(/Aranet Cloud/aranet.last_update["{#GATEWAY_ID}", "{#SENSOR_ID}"]) > {$ARANET.LAST_UPDATE.MAX.WARN:"{#SENSOR_NAME}"}` |WARNING | |
## Feedback
diff --git a/templates/app/aranet/aranet_cloud.yaml b/templates/app/aranet/aranet_cloud.yaml
index 8dd99489fe7..cf185fe773b 100644
--- a/templates/app/aranet/aranet_cloud.yaml
+++ b/templates/app/aranet/aranet_cloud.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:19Z'
+ date: '2021-12-19T15:19:29Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -10,7 +10,7 @@ zabbix_export:
uuid: af21edc47557400583e537904ea632aa
template: 'Aranet Cloud'
name: 'Aranet Cloud'
- description: 'Template tooling version used: 0.38'
+ description: 'Template tooling version used: 0.40'
groups:
-
name: Templates/Applications
@@ -1729,57 +1729,68 @@ zabbix_export:
-
macro: '{$ARANET.API.ENDPOINT}'
value: 'https://aranet.cloud/api'
+ description: 'Aranet Cloud API endpoint.'
-
macro: '{$ARANET.API.PASSWORD}'
value: '<PUT YOUR PASSWORD>'
+ description: 'Aranet Cloud password.'
-
macro: '{$ARANET.API.SPACE_NAME}'
value: '<PUT YOUR SPACE NAME>'
+ description: 'Aranet Cloud organization name.'
-
macro: '{$ARANET.API.USERNAME}'
value: '<PUT YOUR USERNAME>'
+ description: 'Aranet Cloud username.'
-
macro: '{$ARANET.BATT.VOLTAGE.MIN.CRIT}'
value: '2'
+ description: 'Battery voltage critical threshold.'
-
macro: '{$ARANET.BATT.VOLTAGE.MIN.WARN}'
value: '1'
+ description: 'Battery voltage warning threshold.'
-
macro: '{$ARANET.CO2.MAX.CRIT}'
value: '1000'
+ description: 'CO2 critical threshold.'
-
macro: '{$ARANET.CO2.MAX.WARN}'
value: '600'
+ description: 'CO2 warning threshold.'
-
macro: '{$ARANET.HUMIDITY.MAX.WARN}'
value: '70'
+ description: 'Maximum humidity threshold.'
-
macro: '{$ARANET.HUMIDITY.MIN.WARN}'
value: '20'
+ description: 'Minimum humidity threshold.'
-
macro: '{$ARANET.LAST_UPDATE.MAX.WARN}'
value: 1h
+ description: 'Data update delay threshold.'
-
macro: '{$ARANET.LLD.FILTER.GATEWAY_ID.MATCHES}'
value: .+
- description: 'Filter of discoverable sensors by gateway id'
+ description: 'Filter of discoverable sensors by gateway id.'
-
macro: '{$ARANET.LLD.FILTER.GATEWAY_NAME.MATCHES}'
value: .+
- description: 'Filter of discoverable sensors by gateway name'
+ description: 'Filter of discoverable sensors by gateway name.'
-
macro: '{$ARANET.LLD.FILTER.GATEWAY_NAME.NOT_MATCHES}'
value: CHANGE_IF_NEEDED
- description: 'Filter to exclude discoverable sensors by gateway name'
+ description: 'Filter to exclude discoverable sensors by gateway name.'
-
macro: '{$ARANET.LLD.FILTER.SENSOR_ID.MATCHES}'
value: .+
- description: 'Filter of discoverable sensors by id'
+ description: 'Filter of discoverable sensors by id.'
-
macro: '{$ARANET.LLD.FILTER.SENSOR_NAME.MATCHES}'
value: .+
- description: 'Filter of discoverable sensors by name'
+ description: 'Filter of discoverable sensors by name.'
-
macro: '{$ARANET.LLD.FILTER.SENSOR_NAME.NOT_MATCHES}'
value: CHANGE_IF_NEEDED
- description: 'Filter to exclude discoverable sensors by name'
+ description: 'Filter to exclude discoverable sensors by name.'
diff --git a/templates/app/ceph_agent2/README.md b/templates/app/ceph_agent2/README.md
index 1f2e7b6df37..d82bb7cb762 100644
--- a/templates/app/ceph_agent2/README.md
+++ b/templates/app/ceph_agent2/README.md
@@ -57,10 +57,10 @@ There are no template links in this template.
|Ceph |Ceph: Number of Monitors |<p>Number of Monitors configured in Ceph cluster</p> |DEPENDENT |ceph.num_mon<p>**Preprocessing**:</p><p>- JSONPATH: `$.num_mon`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Ceph |Ceph: Overall cluster status |<p>Overall Ceph cluster status, eg 0 - HEALTH_OK, 1 - HEALTH_WARN or 2 - HEALTH_ERR</p> |DEPENDENT |ceph.overall_status<p>**Preprocessing**:</p><p>- JSONPATH: `$.overall_status`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|Ceph |Ceph: Minimum Mon release version |<p>min_mon_release_name</p> |DEPENDENT |ceph.min_mon_release_name<p>**Preprocessing**:</p><p>- JSONPATH: `$.min_mon_release_name`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Ceph |Ceph: Ceph Read bandwidth |<p>Global read Bytes per second</p> |DEPENDENT |ceph.rd_bytes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.rd_bytes`</p><p>- CHANGE_PER_SECOND |
-|Ceph |Ceph: Ceph Write bandwidth |<p>Global write Bytes per second</p> |DEPENDENT |ceph.wr_bytes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.wr_bytes`</p><p>- CHANGE_PER_SECOND |
+|Ceph |Ceph: Ceph Read bandwidth |<p>Global read Bytes per second</p> |DEPENDENT |ceph.rd_bytes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.rd_bytes`</p><p>- CHANGE_PER_SECOND</p> |
+|Ceph |Ceph: Ceph Write bandwidth |<p>Global write Bytes per second</p> |DEPENDENT |ceph.wr_bytes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.wr_bytes`</p><p>- CHANGE_PER_SECOND</p> |
|Ceph |Ceph: Ceph Read operations per sec |<p>Global read operations per second</p> |DEPENDENT |ceph.rd_ops.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.rd_ops`</p> |
-|Ceph |Ceph: Ceph Write operations per sec |<p>Global write operations per second</p> |DEPENDENT |ceph.wr_ops.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.wr_ops`</p><p>- CHANGE_PER_SECOND |
+|Ceph |Ceph: Ceph Write operations per sec |<p>Global write operations per second</p> |DEPENDENT |ceph.wr_ops.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.wr_ops`</p><p>- CHANGE_PER_SECOND</p> |
|Ceph |Ceph: Total bytes available |<p>Total bytes available in Ceph cluster</p> |DEPENDENT |ceph.total_avail_bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.total_avail_bytes`</p> |
|Ceph |Ceph: Total bytes |<p>Total (RAW) capacity of Ceph cluster in bytes</p> |DEPENDENT |ceph.total_bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.total_bytes`</p> |
|Ceph |Ceph: Total bytes used |<p>Total bytes used in Ceph cluster</p> |DEPENDENT |ceph.total_used_bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.total_used_bytes`</p> |
@@ -111,10 +111,10 @@ There are no template links in this template.
|Ceph |Ceph: [{#POOLNAME}] Pool RAW Used |<p>Bytes used in pool including copies made.</p> |DEPENDENT |ceph.pool["{#POOLNAME}",stored_raw]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].stored_raw`</p> |
|Ceph |Ceph: [{#POOLNAME}] Pool Percent Used |<p>Percentage of storage used per pool</p> |DEPENDENT |ceph.pool["{#POOLNAME}",percent_used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].percent_used`</p> |
|Ceph |Ceph: [{#POOLNAME}] Pool objects |<p>Number of objects in the pool.</p> |DEPENDENT |ceph.pool["{#POOLNAME}",objects]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].objects`</p> |
-|Ceph |Ceph: [{#POOLNAME}] Pool Read bandwidth |<p>Per-pool read Bytes/second</p> |DEPENDENT |ceph.pool["{#POOLNAME}",rd_bytes.rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].rd_bytes`</p><p>- CHANGE_PER_SECOND |
-|Ceph |Ceph: [{#POOLNAME}] Pool Write bandwidth |<p>Per-pool write Bytes/second</p> |DEPENDENT |ceph.pool["{#POOLNAME}",wr_bytes.rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].wr_bytes`</p><p>- CHANGE_PER_SECOND |
-|Ceph |Ceph: [{#POOLNAME}] Pool Read operations |<p>Per-pool read operations/second</p> |DEPENDENT |ceph.pool["{#POOLNAME}",rd_ops.rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].rd_ops`</p><p>- CHANGE_PER_SECOND |
-|Ceph |Ceph: [{#POOLNAME}] Pool Write operations |<p>Per-pool write operations/second</p> |DEPENDENT |ceph.pool["{#POOLNAME}",wr_ops.rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].wr_ops`</p><p>- CHANGE_PER_SECOND |
+|Ceph |Ceph: [{#POOLNAME}] Pool Read bandwidth |<p>Per-pool read Bytes/second</p> |DEPENDENT |ceph.pool["{#POOLNAME}",rd_bytes.rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].rd_bytes`</p><p>- CHANGE_PER_SECOND</p> |
+|Ceph |Ceph: [{#POOLNAME}] Pool Write bandwidth |<p>Per-pool write Bytes/second</p> |DEPENDENT |ceph.pool["{#POOLNAME}",wr_bytes.rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].wr_bytes`</p><p>- CHANGE_PER_SECOND</p> |
+|Ceph |Ceph: [{#POOLNAME}] Pool Read operations |<p>Per-pool read operations/second</p> |DEPENDENT |ceph.pool["{#POOLNAME}",rd_ops.rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].rd_ops`</p><p>- CHANGE_PER_SECOND</p> |
+|Ceph |Ceph: [{#POOLNAME}] Pool Write operations |<p>Per-pool write operations/second</p> |DEPENDENT |ceph.pool["{#POOLNAME}",wr_ops.rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pools["{#POOLNAME}"].wr_ops`</p><p>- CHANGE_PER_SECOND</p> |
|Zabbix_raw_items |Ceph: Get overall cluster status | |ZABBIX_PASSIVE |ceph.status["{$CEPH.CONNSTRING}","{$CEPH.USER}","{$CEPH.API.KEY}"] |
|Zabbix_raw_items |Ceph: Get OSD stats | |ZABBIX_PASSIVE |ceph.osd.stats["{$CEPH.CONNSTRING}","{$CEPH.USER}","{$CEPH.API.KEY}"] |
|Zabbix_raw_items |Ceph: Get OSD dump | |ZABBIX_PASSIVE |ceph.osd.dump["{$CEPH.CONNSTRING}","{$CEPH.USER}","{$CEPH.API.KEY}"] |
@@ -124,13 +124,13 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Ceph: Can not connect to cluster |<p>Connection to Ceph RESTful module is broken (if there is any error presented including AUTH and configuration issues).</p> |`{TEMPLATE_NAME:ceph.ping["{$CEPH.CONNSTRING}","{$CEPH.USER}","{$CEPH.API.KEY}"].last()}=0` |AVERAGE | |
-|Ceph: Cluster in ERROR state |<p>-</p> |`{TEMPLATE_NAME:ceph.overall_status.last()}=2` |AVERAGE |<p>Manual close: YES</p> |
-|Ceph: Cluster in WARNING state |<p>-</p> |`{TEMPLATE_NAME:ceph.overall_status.last()}=1`<p>Recovery expression:</p>`{TEMPLATE_NAME:ceph.overall_status.last()}=0` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Ceph: Cluster in ERROR state</p> |
-|Ceph: Minimum monitor release version has changed (new version: {ITEM.VALUE}) |<p>Ceph version has changed. Ack to close.</p> |`{TEMPLATE_NAME:ceph.min_mon_release_name.diff()}=1 and {TEMPLATE_NAME:ceph.min_mon_release_name.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Ceph: OSD osd.{#OSDNAME} is down |<p>OSD osd.{#OSDNAME} is marked "down" in the osdmap.</p><p>The OSD daemon may have been stopped, or peer OSDs may be unable to reach the OSD over the network.</p> |`{TEMPLATE_NAME:ceph.osd[{#OSDNAME},up].last()} = 0` |AVERAGE | |
-|Ceph: OSD osd.{#OSDNAME} is full |<p>-</p> |`{TEMPLATE_NAME:ceph.osd[{#OSDNAME},fill].min(15m)} > {Ceph by Zabbix Agent2:ceph.osd_full_ratio.last()}*100` |AVERAGE | |
-|Ceph: Ceph OSD osd.{#OSDNAME} is near full |<p>-</p> |`{TEMPLATE_NAME:ceph.osd[{#OSDNAME},fill].min(15m)} > {Ceph by Zabbix Agent2:ceph.osd_nearfull_ratio.last()}*100` |WARNING |<p>**Depends on**:</p><p>- Ceph: OSD osd.{#OSDNAME} is full</p> |
+|Ceph: Can not connect to cluster |<p>Connection to Ceph RESTful module is broken (if there is any error presented including AUTH and configuration issues).</p> |`last(/Ceph by Zabbix agent 2/ceph.ping["{$CEPH.CONNSTRING}","{$CEPH.USER}","{$CEPH.API.KEY}"])=0` |AVERAGE | |
+|Ceph: Cluster in ERROR state |<p>-</p> |`last(/Ceph by Zabbix agent 2/ceph.overall_status)=2` |AVERAGE |<p>Manual close: YES</p> |
+|Ceph: Cluster in WARNING state |<p>-</p> |`last(/Ceph by Zabbix agent 2/ceph.overall_status)=1`<p>Recovery expression:</p>`last(/Ceph by Zabbix agent 2/ceph.overall_status)=0` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Ceph: Cluster in ERROR state</p> |
+|Ceph: Minimum monitor release version has changed (new version: {ITEM.VALUE}) |<p>Ceph version has changed. Ack to close.</p> |`last(/Ceph by Zabbix agent 2/ceph.min_mon_release_name,#1)<>last(/Ceph by Zabbix agent 2/ceph.min_mon_release_name,#2) and length(last(/Ceph by Zabbix agent 2/ceph.min_mon_release_name))>0` |INFO |<p>Manual close: YES</p> |
+|Ceph: OSD osd.{#OSDNAME} is down |<p>OSD osd.{#OSDNAME} is marked "down" in the osdmap.</p><p>The OSD daemon may have been stopped, or peer OSDs may be unable to reach the OSD over the network.</p> |`last(/Ceph by Zabbix agent 2/ceph.osd[{#OSDNAME},up]) = 0` |AVERAGE | |
+|Ceph: OSD osd.{#OSDNAME} is full |<p>-</p> |`min(/Ceph by Zabbix agent 2/ceph.osd[{#OSDNAME},fill],15m) > last(/Ceph by Zabbix agent 2/ceph.osd_full_ratio)*100` |AVERAGE | |
+|Ceph: Ceph OSD osd.{#OSDNAME} is near full |<p>-</p> |`min(/Ceph by Zabbix agent 2/ceph.osd[{#OSDNAME},fill],15m) > last(/Ceph by Zabbix agent 2/ceph.osd_nearfull_ratio)*100` |WARNING |<p>**Depends on**:</p><p>- Ceph: OSD osd.{#OSDNAME} is full</p> |
## Feedback
diff --git a/templates/app/ceph_agent2/template_app_ceph_agent2.yaml b/templates/app/ceph_agent2/template_app_ceph_agent2.yaml
index 9cc6394fb92..7d5df48d1cc 100644
--- a/templates/app/ceph_agent2/template_app_ceph_agent2.yaml
+++ b/templates/app/ceph_agent2/template_app_ceph_agent2.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:15Z'
+ date: '2021-12-19T15:19:30Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -13,7 +13,7 @@ zabbix_export:
description: |
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/410059-discussion-thread-for-official-zabbix-template-ceph
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/certificate_agent2/README.md b/templates/app/certificate_agent2/README.md
index 6148a0ce5b8..f97a9fe0845 100644
--- a/templates/app/certificate_agent2/README.md
+++ b/templates/app/certificate_agent2/README.md
@@ -64,9 +64,9 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Cert: SSL certificate is invalid |<p>SSL certificate has expired or it is issued for another domain.</p> |`find(/TEMPLATE_NAME/cert.validation,,"like","invalid")=1` |HIGH | |
-|Cert: SSL certificate expires soon (less than {$CERT.EXPIRY.WARN} days) |<p>The SSL certificate should be updated or it will become untrusted.</p> |`(last(/TEMPLATE_NAME/cert.not_after) - now()) / 86400 < {$CERT.EXPIRY.WARN}` |WARNING |<p>**Depends on**:</p><p>- Cert: SSL certificate is invalid</p> |
-|Cert: Fingerprint has changed (new version: {ITEM.VALUE}) |<p>The SSL certificate fingerprint has changed. If you did not update the certificate, it may mean your certificate has been hacked. Ack to close.</p><p>There could be multiple valid certificates on some installations. In this case, the trigger will have a false positive. You can ignore it or disable the trigger.</p> |`last(/TEMPLATE_NAME/cert.sha1_fingerprint) <> last(/TEMPLATE_NAME/cert.sha1_fingerprint,#2)` |INFO |<p>Manual close: YES</p> |
+|Cert: SSL certificate is invalid |<p>SSL certificate has expired or it is issued for another domain.</p> |`find(/Website certificate by Zabbix agent 2/cert.validation,,"like","invalid")=1` |HIGH | |
+|Cert: SSL certificate expires soon (less than {$CERT.EXPIRY.WARN} days) |<p>The SSL certificate should be updated or it will become untrusted.</p> |`(last(/Website certificate by Zabbix agent 2/cert.not_after) - now()) / 86400 < {$CERT.EXPIRY.WARN}` |WARNING |<p>**Depends on**:</p><p>- Cert: SSL certificate is invalid</p> |
+|Cert: Fingerprint has changed (new version: {ITEM.VALUE}) |<p>The SSL certificate fingerprint has changed. If you did not update the certificate, it may mean your certificate has been hacked. Ack to close.</p><p>There could be multiple valid certificates on some installations. In this case, the trigger will have a false positive. You can ignore it or disable the trigger.</p> |`last(/Website certificate by Zabbix agent 2/cert.sha1_fingerprint) <> last(/Website certificate by Zabbix agent 2/cert.sha1_fingerprint,#2)` |INFO |<p>Manual close: YES</p> |
## Feedback
diff --git a/templates/app/certificate_agent2/template_app_certificate_agent2.yaml b/templates/app/certificate_agent2/template_app_certificate_agent2.yaml
index 9ee2a54a4f9..621c857afd0 100644
--- a/templates/app/certificate_agent2/template_app_certificate_agent2.yaml
+++ b/templates/app/certificate_agent2/template_app_certificate_agent2.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-08-09T16:55:44Z'
+ date: '2021-12-19T15:19:31Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -16,7 +16,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/428309-discussion-thread-for-official-zabbix-template-tls-ssl-certificates-monitoring
- Template tooling version used: 0.39
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/cloudflare_http/README.md b/templates/app/cloudflare_http/README.md
index 9cd6d448b8c..166564d887c 100644
--- a/templates/app/cloudflare_http/README.md
+++ b/templates/app/cloudflare_http/README.md
@@ -76,8 +76,8 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Cloudflare: Cached bandwidth is too low (less than {$CLOUDFLARE.CACHED_BANDWIDTH.MIN.WARN}% for 3 last measurements) | |`max(/TEMPLATE_NAME/cloudflare.bandwidth.cache_hit_ratio,#3) < {$CLOUDFLARE.CACHED_BANDWIDTH.MIN.WARN}` |WARNING | |
-|Cloudflare: Ratio of non-2xx responses is too high (more than {$CLOUDFLARE.ERRORS.MAX.WARN}% for 3 last measurements) |<p>A large number of errors can indicate a malfunction of the site.</p> |`min(/TEMPLATE_NAME/cloudflare.requests.others_ratio,#3) > {$CLOUDFLARE.ERRORS.MAX.WARN}` |AVERAGE | |
+|Cloudflare: Cached bandwidth is too low (less than {$CLOUDFLARE.CACHED_BANDWIDTH.MIN.WARN}% for 3 last measurements) | |`max(/Cloudflare by HTTP/cloudflare.bandwidth.cache_hit_ratio,#3) < {$CLOUDFLARE.CACHED_BANDWIDTH.MIN.WARN}` |WARNING | |
+|Cloudflare: Ratio of non-2xx responses is too high (more than {$CLOUDFLARE.ERRORS.MAX.WARN}% for 3 last measurements) |<p>A large number of errors can indicate a malfunction of the site.</p> |`min(/Cloudflare by HTTP/cloudflare.requests.others_ratio,#3) > {$CLOUDFLARE.ERRORS.MAX.WARN}` |AVERAGE | |
## Feedback
diff --git a/templates/app/cloudflare_http/template_app_cloudflare_http.yaml b/templates/app/cloudflare_http/template_app_cloudflare_http.yaml
index 73afead30a8..d475bb48588 100644
--- a/templates/app/cloudflare_http/template_app_cloudflare_http.yaml
+++ b/templates/app/cloudflare_http/template_app_cloudflare_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-07-27T10:10:26Z'
+ date: '2021-12-19T15:19:31Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -13,7 +13,7 @@ zabbix_export:
description: |
The template to monitor Cloudflare to see your web traffic and DNS metrics. It works without any external scripts and uses Script item.
- Template tooling version used: 0.39
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -972,14 +972,6 @@ zabbix_export:
name: source_type
value: '0'
-
- type: INTEGER
- name: columns
- value: '1'
- -
- type: INTEGER
- name: rows
- value: '1'
- -
type: GRAPH
name: graphid
value:
@@ -996,14 +988,6 @@ zabbix_export:
name: source_type
value: '0'
-
- type: INTEGER
- name: columns
- value: '1'
- -
- type: INTEGER
- name: rows
- value: '1'
- -
type: GRAPH
name: graphid
value:
@@ -1020,14 +1004,6 @@ zabbix_export:
name: source_type
value: '0'
-
- type: INTEGER
- name: columns
- value: '1'
- -
- type: INTEGER
- name: rows
- value: '1'
- -
type: GRAPH
name: graphid
value:
@@ -1045,14 +1021,6 @@ zabbix_export:
name: source_type
value: '0'
-
- type: INTEGER
- name: columns
- value: '1'
- -
- type: INTEGER
- name: rows
- value: '1'
- -
type: GRAPH
name: graphid
value:
@@ -1069,14 +1037,6 @@ zabbix_export:
name: source_type
value: '0'
-
- type: INTEGER
- name: columns
- value: '1'
- -
- type: INTEGER
- name: rows
- value: '1'
- -
type: GRAPH
name: graphid
value:
@@ -1094,14 +1054,6 @@ zabbix_export:
name: source_type
value: '0'
-
- type: INTEGER
- name: columns
- value: '1'
- -
- type: INTEGER
- name: rows
- value: '1'
- -
type: GRAPH
name: graphid
value:
@@ -1118,14 +1070,6 @@ zabbix_export:
name: source_type
value: '0'
-
- type: INTEGER
- name: columns
- value: '1'
- -
- type: INTEGER
- name: rows
- value: '1'
- -
type: GRAPH
name: graphid
value:
diff --git a/templates/app/docker/README.md b/templates/app/docker/README.md
index 7ac91139094..74ac972ec22 100644
--- a/templates/app/docker/README.md
+++ b/templates/app/docker/README.md
@@ -7,7 +7,7 @@ For Zabbix version: 5.4 and higher
The template to monitor Docker engine by Zabbix that work without any external scripts.
Most of the metrics are collected in one go, thanks to Zabbix bulk data collection.
-Template `Docker by Zabbix agent 2` — collects metrics by polling zabbix-agent2.
+Template `Docker by Zabbix agent 2` — collects metrics by polling zabbix-agent2.
@@ -45,8 +45,8 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Images discovery |<p>Discovery for images metrics</p> |ZABBIX_PASSIVE |docker.images.discovery<p>**Filter**:</p>AND <p>- A: {#NAME} MATCHES_REGEX `{$DOCKER.LLD.FILTER.IMAGE.MATCHES}`</p><p>- B: {#NAME} NOT_MATCHES_REGEX `{$DOCKER.LLD.FILTER.IMAGE.NOT_MATCHES}`</p> |
-|Containers discovery |<p>Discovery for containers metrics</p><p>Parameter:</p><p>true - Returns all containers</p><p>false - Returns only running containers</p> |ZABBIX_PASSIVE |docker.containers.discovery[false]<p>**Filter**:</p>AND <p>- A: {#NAME} MATCHES_REGEX `{$DOCKER.LLD.FILTER.CONTAINER.MATCHES}`</p><p>- B: {#NAME} NOT_MATCHES_REGEX `{$DOCKER.LLD.FILTER.CONTAINER.NOT_MATCHES}`</p> |
+|Images discovery |<p>Discovery for images metrics</p> |ZABBIX_PASSIVE |docker.images.discovery<p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$DOCKER.LLD.FILTER.IMAGE.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$DOCKER.LLD.FILTER.IMAGE.NOT_MATCHES}`</p> |
+|Containers discovery |<p>Discovery for containers metrics</p><p>Parameter:</p><p>true - Returns all containers</p><p>false - Returns only running containers</p> |ZABBIX_PASSIVE |docker.containers.discovery[false]<p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$DOCKER.LLD.FILTER.CONTAINER.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$DOCKER.LLD.FILTER.CONTAINER.NOT_MATCHES}`</p> |
## Items collected
@@ -59,19 +59,19 @@ There are no template links in this template.
|Docker |Docker: Containers paused |<p>Total number of containers paused on this host</p> |DEPENDENT |docker.containers.paused<p>**Preprocessing**:</p><p>- JSONPATH: `$.ContainersPaused`</p> |
|Docker |Docker: Images total |<p>Number of images with intermediate image layers</p> |DEPENDENT |docker.images.total<p>**Preprocessing**:</p><p>- JSONPATH: `$.Images`</p> |
|Docker |Docker: Storage driver |<p>Docker storage driver </p><p> https://docs.docker.com/storage/storagedriver/</p> |DEPENDENT |docker.driver<p>**Preprocessing**:</p><p>- JSONPATH: `$.Driver`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: Memory limit enabled |<p>-</p> |DEPENDENT |docker.mem_limit.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.MemoryLimit`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: Swap limit enabled |<p>-</p> |DEPENDENT |docker.swap_limit.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.SwapLimit`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: Kernel memory enabled |<p>-</p> |DEPENDENT |docker.kernel_mem.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.KernelMemory`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: Kernel memory TCP enabled |<p>-</p> |DEPENDENT |docker.kernel_mem_tcp.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.KernelMemoryTCP`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: CPU CFS Period enabled |<p>https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler</p> |DEPENDENT |docker.cpu_cfs_period.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.CpuCfsPeriod`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: CPU CFS Quota enabled |<p>https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler</p> |DEPENDENT |docker.cpu_cfs_quota.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.CpuCfsQuota`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: CPU Shares enabled |<p>https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler</p> |DEPENDENT |docker.cpu_shares.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.CPUShares`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: CPU Set enabled |<p>https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler</p> |DEPENDENT |docker.cpu_set.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.CPUSet`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: Pids limit enabled |<p>-</p> |DEPENDENT |docker.pids_limit.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.PidsLimit`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: IPv4 Forwarding enabled |<p>-</p> |DEPENDENT |docker.ipv4_forwarding.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.IPv4Forwarding`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: Debug enabled |<p>-</p> |DEPENDENT |docker.debug.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.Debug`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: Memory limit enabled |<p>-</p> |DEPENDENT |docker.mem_limit.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.MemoryLimit`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: Swap limit enabled |<p>-</p> |DEPENDENT |docker.swap_limit.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.SwapLimit`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: Kernel memory enabled |<p>-</p> |DEPENDENT |docker.kernel_mem.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.KernelMemory`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: Kernel memory TCP enabled |<p>-</p> |DEPENDENT |docker.kernel_mem_tcp.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.KernelMemoryTCP`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: CPU CFS Period enabled |<p>https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler</p> |DEPENDENT |docker.cpu_cfs_period.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.CpuCfsPeriod`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: CPU CFS Quota enabled |<p>https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler</p> |DEPENDENT |docker.cpu_cfs_quota.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.CpuCfsQuota`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: CPU Shares enabled |<p>https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler</p> |DEPENDENT |docker.cpu_shares.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.CPUShares`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: CPU Set enabled |<p>https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler</p> |DEPENDENT |docker.cpu_set.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.CPUSet`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: Pids limit enabled |<p>-</p> |DEPENDENT |docker.pids_limit.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.PidsLimit`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: IPv4 Forwarding enabled |<p>-</p> |DEPENDENT |docker.ipv4_forwarding.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.IPv4Forwarding`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: Debug enabled |<p>-</p> |DEPENDENT |docker.debug.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.Debug`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Docker: Nfd |<p>Number of used File Descriptors</p> |DEPENDENT |docker.nfd<p>**Preprocessing**:</p><p>- JSONPATH: `$.NFd`</p> |
-|Docker |Docker: OomKill disabled |<p>-</p> |DEPENDENT |docker.oomkill.disabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.OomKillDisable`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: OomKill disabled |<p>-</p> |DEPENDENT |docker.oomkill.disabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.OomKillDisable`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Docker: Goroutines |<p>Number of goroutines</p> |DEPENDENT |docker.goroutines<p>**Preprocessing**:</p><p>- JSONPATH: `$.NGoroutines`</p> |
|Docker |Docker: Logging driver |<p>-</p> |DEPENDENT |docker.logging_driver<p>**Preprocessing**:</p><p>- JSONPATH: `$.LoggingDriver`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Docker: Cgroup driver |<p>-</p> |DEPENDENT |docker.cgroup_driver<p>**Preprocessing**:</p><p>- JSONPATH: `$.CgroupDriver`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
@@ -86,7 +86,7 @@ There are no template links in this template.
|Docker |Docker: Name |<p>-</p> |DEPENDENT |docker.name<p>**Preprocessing**:</p><p>- JSONPATH: `$.Name`</p> |
|Docker |Docker: Server version |<p>-</p> |DEPENDENT |docker.server_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.ServerVersion`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Docker: Default runtime |<p>-</p> |DEPENDENT |docker.default_runtime<p>**Preprocessing**:</p><p>- JSONPATH: `$.DefaultRuntime`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Docker |Docker: Live restore enabled |<p>-</p> |DEPENDENT |docker.live_restore.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.LiveRestoreEnabled`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Docker |Docker: Live restore enabled |<p>-</p> |DEPENDENT |docker.live_restore.enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.LiveRestoreEnabled`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Docker: Layers size |<p>-</p> |DEPENDENT |docker.layers_size<p>**Preprocessing**:</p><p>- JSONPATH: `$.LayersSize`</p> |
|Docker |Docker: Images size |<p>-</p> |DEPENDENT |docker.images_size<p>**Preprocessing**:</p><p>- JSONPATH: `$.Images[*].Size.sum()`</p> |
|Docker |Docker: Containers size |<p>-</p> |DEPENDENT |docker.containers_size<p>**Preprocessing**:</p><p>- JSONPATH: `$.Containers[*].SizeRw.sum()`</p> |
@@ -95,10 +95,10 @@ There are no template links in this template.
|Docker |Image {#NAME}: Created |<p>-</p> |DEPENDENT |docker.image.created["{#ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.Id == "{#ID}")].Created.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Image {#NAME}: Size |<p>-</p> |DEPENDENT |docker.image.size["{#ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.Id == "{#ID}")].Size.first()`</p> |
|Docker |Container {#NAME}: Get stats |<p>Get container stats based on resource usage</p> |ZABBIX_PASSIVE |docker.container_stats["{#NAME}"] |
-|Docker |Container {#NAME}: CPU total usage per second |<p>-</p> |DEPENDENT |docker.container_stats.cpu_usage.total.rate["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.cpu_usage.total_usage`</p><p>- CHANGE_PER_SECOND<p>- MULTIPLIER: `1.0E-9`</p> |
+|Docker |Container {#NAME}: CPU total usage per second |<p>-</p> |DEPENDENT |docker.container_stats.cpu_usage.total.rate["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.cpu_usage.total_usage`</p><p>- CHANGE_PER_SECOND</p><p>- MULTIPLIER: `1.0E-9`</p> |
|Docker |Container {#NAME}: CPU percent usage |<p>-</p> |DEPENDENT |docker.container_stats.cpu_pct_usage["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.cpu_usage.percent_usage`</p> |
-|Docker |Container {#NAME}: CPU kernelmode usage per second |<p>-</p> |DEPENDENT |docker.container_stats.cpu_usage.kernel.rate["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.cpu_usage.usage_in_kernelmode`</p><p>- CHANGE_PER_SECOND<p>- MULTIPLIER: `1.0E-9`</p> |
-|Docker |Container {#NAME}: CPU usermode usage per second |<p>-</p> |DEPENDENT |docker.container_stats.cpu_usage.user.rate["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.cpu_usage.usage_in_usermode`</p><p>- CHANGE_PER_SECOND<p>- MULTIPLIER: `1.0E-9`</p> |
+|Docker |Container {#NAME}: CPU kernelmode usage per second |<p>-</p> |DEPENDENT |docker.container_stats.cpu_usage.kernel.rate["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.cpu_usage.usage_in_kernelmode`</p><p>- CHANGE_PER_SECOND</p><p>- MULTIPLIER: `1.0E-9`</p> |
+|Docker |Container {#NAME}: CPU usermode usage per second |<p>-</p> |DEPENDENT |docker.container_stats.cpu_usage.user.rate["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.cpu_usage.usage_in_usermode`</p><p>- CHANGE_PER_SECOND</p><p>- MULTIPLIER: `1.0E-9`</p> |
|Docker |Container {#NAME}: Online CPUs |<p>-</p> |DEPENDENT |docker.container_stats.online_cpus["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.online_cpus`</p> |
|Docker |Container {#NAME}: Throttling periods |<p>Number of periods with throttling active</p> |DEPENDENT |docker.container_stats.cpu_usage.throttling_periods["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.throttling_data.periods`</p> |
|Docker |Container {#NAME}: Throttled periods |<p>Number of periods when the container hits its throttling limit</p> |DEPENDENT |docker.container_stats.cpu_usage.throttled_periods["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpu_stats.throttling_data.throttled_periods`</p> |
@@ -108,24 +108,24 @@ There are no template links in this template.
|Docker |Container {#NAME}: Memory commit bytes |<p>-</p> |DEPENDENT |docker.container_stats.memory.commit_bytes["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.memory_stats.commitbytes`</p> |
|Docker |Container {#NAME}: Memory commit peak bytes |<p>-</p> |DEPENDENT |docker.container_stats.memory.commit_peak_bytes["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.memory_stats.commitpeakbytes`</p> |
|Docker |Container {#NAME}: Memory private working set |<p>-</p> |DEPENDENT |docker.container_stats.memory.private_working_set["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.memory_stats.privateworkingset`</p> |
-|Docker |Container {#NAME}: Networks bytes received per second |<p>-</p> |DEPENDENT |docker.networks.rx_bytes["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].rx_bytes.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Docker |Container {#NAME}: Networks packets received per second |<p>-</p> |DEPENDENT |docker.networks.rx_packets["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].rx_packets.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Docker |Container {#NAME}: Networks errors received per second |<p>-</p> |DEPENDENT |docker.networks.rx_errors["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].rx_errors.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Docker |Container {#NAME}: Networks incoming packets dropped per second |<p>-</p> |DEPENDENT |docker.networks.rx_dropped["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].rx_dropped.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Docker |Container {#NAME}: Networks bytes sent per second |<p>-</p> |DEPENDENT |docker.networks.tx_bytes["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].tx_bytes.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Docker |Container {#NAME}: Networks packets sent per second |<p>-</p> |DEPENDENT |docker.networks.tx_packets["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].tx_packets.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Docker |Container {#NAME}: Networks errors sent per second |<p>-</p> |DEPENDENT |docker.networks.tx_errors["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].tx_errors.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Docker |Container {#NAME}: Networks outgoing packets dropped per second |<p>-</p> |DEPENDENT |docker.networks.tx_dropped["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].tx_dropped.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
+|Docker |Container {#NAME}: Networks bytes received per second |<p>-</p> |DEPENDENT |docker.networks.rx_bytes["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].rx_bytes.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Docker |Container {#NAME}: Networks packets received per second |<p>-</p> |DEPENDENT |docker.networks.rx_packets["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].rx_packets.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Docker |Container {#NAME}: Networks errors received per second |<p>-</p> |DEPENDENT |docker.networks.rx_errors["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].rx_errors.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Docker |Container {#NAME}: Networks incoming packets dropped per second |<p>-</p> |DEPENDENT |docker.networks.rx_dropped["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].rx_dropped.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Docker |Container {#NAME}: Networks bytes sent per second |<p>-</p> |DEPENDENT |docker.networks.tx_bytes["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].tx_bytes.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Docker |Container {#NAME}: Networks packets sent per second |<p>-</p> |DEPENDENT |docker.networks.tx_packets["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].tx_packets.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Docker |Container {#NAME}: Networks errors sent per second |<p>-</p> |DEPENDENT |docker.networks.tx_errors["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].tx_errors.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Docker |Container {#NAME}: Networks outgoing packets dropped per second |<p>-</p> |DEPENDENT |docker.networks.tx_dropped["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.networks[*].tx_dropped.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
|Docker |Container {#NAME}: Get info |<p>Return low-level information about a container</p> |ZABBIX_PASSIVE |docker.container_info["{#NAME}"] |
|Docker |Container {#NAME}: Created |<p>-</p> |DEPENDENT |docker.container_info.created["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.Created`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Container {#NAME}: Image |<p>-</p> |DEPENDENT |docker.container_info.image["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.Names[0] == "{#NAME}")].Image.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Container {#NAME}: Restart count |<p>-</p> |DEPENDENT |docker.container_info.restart_count["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.RestartCount`</p> |
|Docker |Container {#NAME}: Status |<p>-</p> |DEPENDENT |docker.container_info.state.status["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Status`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Docker |Container {#NAME}: Running |<p>-</p> |DEPENDENT |docker.container_info.state.running["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Running`</p><p>- BOOL_TO_DECIMAL |
-|Docker |Container {#NAME}: Paused |<p>-</p> |DEPENDENT |docker.container_info.state.paused["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Paused`</p><p>- BOOL_TO_DECIMAL |
-|Docker |Container {#NAME}: Restarting |<p>-</p> |DEPENDENT |docker.container_info.state.restarting["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Restarting`</p><p>- BOOL_TO_DECIMAL |
-|Docker |Container {#NAME}: OOMKilled |<p>-</p> |DEPENDENT |docker.container_info.state.oomkilled["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.OOMKilled`</p><p>- BOOL_TO_DECIMAL |
-|Docker |Container {#NAME}: Dead |<p>-</p> |DEPENDENT |docker.container_info.state.dead["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Dead`</p><p>- BOOL_TO_DECIMAL |
+|Docker |Container {#NAME}: Running |<p>-</p> |DEPENDENT |docker.container_info.state.running["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Running`</p><p>- BOOL_TO_DECIMAL</p> |
+|Docker |Container {#NAME}: Paused |<p>-</p> |DEPENDENT |docker.container_info.state.paused["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Paused`</p><p>- BOOL_TO_DECIMAL</p> |
+|Docker |Container {#NAME}: Restarting |<p>-</p> |DEPENDENT |docker.container_info.state.restarting["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Restarting`</p><p>- BOOL_TO_DECIMAL</p> |
+|Docker |Container {#NAME}: OOMKilled |<p>-</p> |DEPENDENT |docker.container_info.state.oomkilled["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.OOMKilled`</p><p>- BOOL_TO_DECIMAL</p> |
+|Docker |Container {#NAME}: Dead |<p>-</p> |DEPENDENT |docker.container_info.state.dead["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Dead`</p><p>- BOOL_TO_DECIMAL</p> |
|Docker |Container {#NAME}: Pid |<p>-</p> |DEPENDENT |docker.container_info.state.pid["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Pid`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Container {#NAME}: Exit code |<p>-</p> |DEPENDENT |docker.container_info.state.exitcode["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.ExitCode`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Docker |Container {#NAME}: Error |<p>-</p> |DEPENDENT |docker.container_info.state.error["{#NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.State.Error`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
@@ -140,15 +140,15 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Docker: Service is down |<p>-</p> |`{TEMPLATE_NAME:docker.ping.last()}=0` |AVERAGE |<p>Manual close: YES</p> |
-|Docker: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`{TEMPLATE_NAME:docker.name.nodata(30m)}=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Docker: Service is down</p> |
-|Docker: Version has changed (new version: {ITEM.VALUE}) |<p>Docker version has changed. Ack to close.</p> |`{TEMPLATE_NAME:docker.server_version.diff()}=1 and {TEMPLATE_NAME:docker.server_version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Container {#NAME}: Container has been stopped with error code |<p>-</p> |`{TEMPLATE_NAME:docker.container_info.state.exitcode["{#NAME}"].last()}>0 and {Docker App by Zabbix agent 2:docker.container_info.state.running["{#NAME}"].last()}=0` |AVERAGE |<p>Manual close: YES</p> |
-|Container {#NAME}: An error has occurred in the container |<p>Container {#NAME} has an error. Ack to close.</p> |`{TEMPLATE_NAME:docker.container_info.state.error["{#NAME}"].diff()}=1 and {TEMPLATE_NAME:docker.container_info.state.error["{#NAME}"].strlen()}>0` |WARNING |<p>Manual close: YES</p> |
+|Docker: Service is down |<p>-</p> |`last(/Docker by Zabbix agent 2/docker.ping)=0` |AVERAGE |<p>Manual close: YES</p> |
+|Docker: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`nodata(/Docker by Zabbix agent 2/docker.name,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Docker: Service is down</p> |
+|Docker: Version has changed (new version: {ITEM.VALUE}) |<p>Docker version has changed. Ack to close.</p> |`last(/Docker by Zabbix agent 2/docker.server_version,#1)<>last(/Docker by Zabbix agent 2/docker.server_version,#2) and length(last(/Docker by Zabbix agent 2/docker.server_version))>0` |INFO |<p>Manual close: YES</p> |
+|Container {#NAME}: Container has been stopped with error code |<p>-</p> |`last(/Docker by Zabbix agent 2/docker.container_info.state.exitcode["{#NAME}"])>0 and last(/Docker by Zabbix agent 2/docker.container_info.state.running["{#NAME}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|Container {#NAME}: An error has occurred in the container |<p>Container {#NAME} has an error. Ack to close.</p> |`last(/Docker by Zabbix agent 2/docker.container_info.state.error["{#NAME}"],#1)<>last(/Docker by Zabbix agent 2/docker.container_info.state.error["{#NAME}"],#2) and length(last(/Docker by Zabbix agent 2/docker.container_info.state.error["{#NAME}"]))>0` |WARNING |<p>Manual close: YES</p> |
## Feedback
Please report any issues with the template at https://support.zabbix.com
-You can also provide a feedback, discuss the template or ask for help with it at [ZABBIX forums]().
+You can also provide a feedback, discuss the template or ask for help with it at [ZABBIX forums](https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/435429-discussion-thread-for-official-zabbix-template-docker).
diff --git a/templates/app/docker/template_app_docker.yaml b/templates/app/docker/template_app_docker.yaml
index 4713e9ccfc2..0b341d76824 100644
--- a/templates/app/docker/template_app_docker.yaml
+++ b/templates/app/docker/template_app_docker.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-09-27T07:59:52Z'
+ date: '2021-12-19T15:19:32Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -13,9 +13,9 @@ zabbix_export:
description: |
Get Docker engine metrics from plugin for the New Zabbix Agent (zabbix-agent2).
- You can discuss this template or leave feedback on our forum
+ You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/435429-discussion-thread-for-official-zabbix-template-docker
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/elasticsearch_http/README.md b/templates/app/elasticsearch_http/README.md
index fddd2f187e8..2446a6261c1 100644
--- a/templates/app/elasticsearch_http/README.md
+++ b/templates/app/elasticsearch_http/README.md
@@ -6,13 +6,12 @@
For Zabbix version: 5.4 and higher
The template to monitor Elasticsearch by Zabbix that work without any external scripts.
It works with both standalone and cluster instances.
-The metrics are collected in one pass remotely using an HTTP agent.
+The metrics are collected in one pass remotely using an HTTP agent.
They are getting values from REST API _cluster/health, _cluster/stats, _nodes/stats requests.
This template was tested on:
-- Zabbix, version 5.0
- Elasticsearch, version 6.5..7.6
## Setup
@@ -29,19 +28,19 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|--------------------------------------------|--------------------------------------------------------------------------------------|---------|
-| {$ELASTICSEARCH.FETCH_LATENCY.MAX.WARN} | <p>Maximum of fetch latency in milliseconds for trigger expression.</p> | `100` |
-| {$ELASTICSEARCH.FLUSH_LATENCY.MAX.WARN} | <p>Maximum of flush latency in milliseconds for trigger expression.</p> | `100` |
-| {$ELASTICSEARCH.HEAP_USED.MAX.CRIT} | <p>The maximum percent in the use of JVM heap for critically trigger expression.</p> | `95` |
-| {$ELASTICSEARCH.HEAP_USED.MAX.WARN} | <p>The maximum percent in the use of JVM heap for warning trigger expression.</p> | `85` |
-| {$ELASTICSEARCH.INDEXING_LATENCY.MAX.WARN} | <p>Maximum of indexing latency in milliseconds for trigger expression.</p> | `100` |
-| {$ELASTICSEARCH.PASSWORD} | <p>The password of the Elasticsearch.</p> | `` |
-| {$ELASTICSEARCH.PORT} | <p>The port of the Elasticsearch host.</p> | `9200` |
-| {$ELASTICSEARCH.QUERY_LATENCY.MAX.WARN} | <p>Maximum of query latency in milliseconds for trigger expression.</p> | `100` |
-| {$ELASTICSEARCH.RESPONSE_TIME.MAX.WARN} | <p>The ES cluster maximum response time in seconds for trigger expression.</p> | `10s` |
-| {$ELASTICSEARCH.SCHEME} | <p>The scheme of the Elasticsearch (http/https).</p> | `http` |
-| {$ELASTICSEARCH.USERNAME} | <p>The username of the Elasticsearch.</p> | `` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$ELASTICSEARCH.FETCH_LATENCY.MAX.WARN} |<p>Maximum of fetch latency in milliseconds for trigger expression.</p> |`100` |
+|{$ELASTICSEARCH.FLUSH_LATENCY.MAX.WARN} |<p>Maximum of flush latency in milliseconds for trigger expression.</p> |`100` |
+|{$ELASTICSEARCH.HEAP_USED.MAX.CRIT} |<p>The maximum percent in the use of JVM heap for critically trigger expression.</p> |`95` |
+|{$ELASTICSEARCH.HEAP_USED.MAX.WARN} |<p>The maximum percent in the use of JVM heap for warning trigger expression.</p> |`85` |
+|{$ELASTICSEARCH.INDEXING_LATENCY.MAX.WARN} |<p>Maximum of indexing latency in milliseconds for trigger expression.</p> |`100` |
+|{$ELASTICSEARCH.PASSWORD} |<p>The password of the Elasticsearch.</p> |`` |
+|{$ELASTICSEARCH.PORT} |<p>The port of the Elasticsearch host.</p> |`9200` |
+|{$ELASTICSEARCH.QUERY_LATENCY.MAX.WARN} |<p>Maximum of query latency in milliseconds for trigger expression.</p> |`100` |
+|{$ELASTICSEARCH.RESPONSE_TIME.MAX.WARN} |<p>The ES cluster maximum response time in seconds for trigger expression.</p> |`10s` |
+|{$ELASTICSEARCH.SCHEME} |<p>The scheme of the Elasticsearch (http/https).</p> |`http` |
+|{$ELASTICSEARCH.USERNAME} |<p>The username of the Elasticsearch.</p> |`` |
## Template links
@@ -49,109 +48,109 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|-------------------------|------------------------------------|------------|-----------------------------------------------------------------------------------------------------------------------|
-| Cluster nodes discovery | <p>Discovery ES cluster nodes.</p> | HTTP_AGENT | es.nodes.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.[*]`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Cluster nodes discovery |<p>Discovery ES cluster nodes.</p> |HTTP_AGENT |es.nodes.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.[*]`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| ES_cluster | ES: Service status | <p>Checks if the service is running and accepting TCP connections.</p> | SIMPLE | net.tcp.service["{$ELASTICSEARCH.SCHEME}","{HOST.CONN}","{$ELASTICSEARCH.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| ES_cluster | ES: Service response time | <p>Checks performance of the TCP service.</p> | SIMPLE | net.tcp.service.perf["{$ELASTICSEARCH.SCHEME}","{HOST.CONN}","{$ELASTICSEARCH.PORT}"] |
-| ES_cluster | ES: Cluster health status | <p>Health status of the cluster, based on the state of its primary and replica shards. Statuses are:</p><p>green</p><p>All shards are assigned.</p><p>yellow</p><p>All primary shards are assigned, but one or more replica shards are unassigned. If a node in the cluster fails, some data could be unavailable until that node is repaired.</p><p>red</p><p>One or more primary shards are unassigned, so some data is unavailable. This can occur briefly during cluster startup as primary shards are assigned.</p> | DEPENDENT | es.cluster.status<p>**Preprocessing**:</p><p>- JSONPATH: `$.status`</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Number of nodes | <p>The number of nodes within the cluster.</p> | DEPENDENT | es.cluster.number_of_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.number_of_nodes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Number of data nodes | <p>The number of nodes that are dedicated to data nodes.</p> | DEPENDENT | es.cluster.number_of_data_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.number_of_data_nodes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Number of relocating shards | <p>The number of shards that are under relocation.</p> | DEPENDENT | es.cluster.relocating_shards<p>**Preprocessing**:</p><p>- JSONPATH: `$.relocating_shards`</p> |
-| ES_cluster | ES: Number of initializing shards | <p>The number of shards that are under initialization.</p> | DEPENDENT | es.cluster.initializing_shards<p>**Preprocessing**:</p><p>- JSONPATH: `$.initializing_shards`</p> |
-| ES_cluster | ES: Number of unassigned shards | <p>The number of shards that are not allocated.</p> | DEPENDENT | es.cluster.unassigned_shards<p>**Preprocessing**:</p><p>- JSONPATH: `$.unassigned_shards`</p> |
-| ES_cluster | ES: Delayed unassigned shards | <p>The number of shards whose allocation has been delayed by the timeout settings.</p> | DEPENDENT | es.cluster.delayed_unassigned_shards<p>**Preprocessing**:</p><p>- JSONPATH: `$.delayed_unassigned_shards`</p> |
-| ES_cluster | ES: Number of pending tasks | <p>The number of cluster-level changes that have not yet been executed.</p> | DEPENDENT | es.cluster.number_of_pending_tasks<p>**Preprocessing**:</p><p>- JSONPATH: `$.number_of_pending_tasks`</p> |
-| ES_cluster | ES: Task max waiting in queue | <p>The time expressed in seconds since the earliest initiated task is waiting for being performed.</p> | DEPENDENT | es.cluster.task_max_waiting_in_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.task_max_waiting_in_queue_millis`</p><p>- MULTIPLIER: `0.001`</p> |
-| ES_cluster | ES: Inactive shards percentage | <p>The ratio of inactive shards in the cluster expressed as a percentage.</p> | DEPENDENT | es.cluster.inactive_shards_percent_as_number<p>**Preprocessing**:</p><p>- JSONPATH: `$.active_shards_percent_as_number`</p><p>- JAVASCRIPT: `return (100 - value)`</p> |
-| ES_cluster | ES: Cluster uptime | <p>Uptime duration in seconds since JVM has last started.</p> | DEPENDENT | es.nodes.jvm.max_uptime[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.jvm.max_uptime_in_millis`</p><p>- MULTIPLIER: `0.001`</p> |
-| ES_cluster | ES: Number of non-deleted documents | <p>The total number of non-deleted documents across all primary shards assigned to the selected nodes.</p><p>This number is based on the documents in Lucene segments and may include the documents from nested fields.</p> | DEPENDENT | es.indices.docs.count<p>**Preprocessing**:</p><p>- JSONPATH: `$.indices.docs.count`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Indices with shards assigned to nodes | <p>The total number of indices with shards assigned to the selected nodes.</p> | DEPENDENT | es.indices.count<p>**Preprocessing**:</p><p>- JSONPATH: `$.indices.count`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Total size of all file stores | <p>The total size in bytes of all file stores across all selected nodes.</p> | DEPENDENT | es.nodes.fs.total_in_bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.fs.total_in_bytes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Total available size to JVM in all file stores | <p>The total number of bytes available to JVM in the file stores across all selected nodes.</p><p>Depending on OS or process-level restrictions, this number may be less than nodes.fs.free_in_byes. </p><p>This is the actual amount of free disk space the selected Elasticsearch nodes can use.</p> | DEPENDENT | es.nodes.fs.available_in_bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.fs.available_in_bytes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Nodes with the data role | <p>The number of selected nodes with the data role.</p> | DEPENDENT | es.nodes.count.data<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.count.data`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Nodes with the ingest role | <p>The number of selected nodes with the ingest role.</p> | DEPENDENT | es.nodes.count.ingest<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.count.ingest`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES: Nodes with the master role | <p>The number of selected nodes with the master role.</p> | DEPENDENT | es.nodes.count.master<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.count.master`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES {#ES.NODE}: Total size | <p>Total size (in bytes) of all file stores.</p> | DEPENDENT | es.node.fs.total.total_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].fs.total.total_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| ES_cluster | ES {#ES.NODE}: Total available size | <p>The total number of bytes available to this Java virtual machine on all file stores. </p><p>Depending on OS or process level restrictions, this might appear less than fs.total.free_in_bytes. </p><p>This is the actual amount of free disk space the Elasticsearch node can utilize.</p> | DEPENDENT | es.node.fs.total.available_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].fs.total.available_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES {#ES.NODE}: Node uptime | <p>JVM uptime in seconds.</p> | DEPENDENT | es.node.jvm.uptime[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.uptime_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-| ES_cluster | ES {#ES.NODE}: Maximum JVM memory available for use | <p>The maximum amount of memory, in bytes, available for use by the heap.</p> | DEPENDENT | es.node.jvm.mem.heap_max_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.mem.heap_max_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| ES_cluster | ES {#ES.NODE}: Amount of JVM heap currently in use | <p>The memory, in bytes, currently in use by the heap.</p> | DEPENDENT | es.node.jvm.mem.heap_used_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.mem.heap_used_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES {#ES.NODE}: Percent of JVM heap currently in use | <p>The percentage of memory currently in use by the heap.</p> | DEPENDENT | es.node.jvm.mem.heap_used_percent[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.mem.heap_used_percent.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES {#ES.NODE}: Amount of JVM heap committed | <p>The amount of memory, in bytes, available for use by the heap.</p> | DEPENDENT | es.node.jvm.mem.heap_committed_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.mem.heap_committed_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES {#ES.NODE}: Number of open HTTP connections | <p>The number of currently open HTTP connections for the node.</p> | DEPENDENT | es.node.http.current_open[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].http.current_open.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES {#ES.NODE}: Rate of HTTP connections opened | <p>The number of HTTP connections opened for the node per second.</p> | DEPENDENT | es.node.http.opened.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].http.total_opened.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Time spent throttling operations | <p>Time in seconds spent throttling operations for the last measuring span.</p> | DEPENDENT | es.node.indices.indexing.throttle_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.indexing.throttle_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE |
-| ES_cluster | ES {#ES.NODE}: Time spent throttling recovery operations | <p>Time in seconds spent throttling recovery operations for the last measuring span.</p> | DEPENDENT | es.node.indices.recovery.throttle_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.recovery.throttle_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE |
-| ES_cluster | ES {#ES.NODE}: Time spent throttling merge operations | <p>Time in seconds spent throttling merge operations for the last measuring span.</p> | DEPENDENT | es.node.indices.merges.total_throttled_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.merges.total_throttled_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE |
-| ES_cluster | ES {#ES.NODE}: Rate of queries | <p>The number of query operations per second.</p> | DEPENDENT | es.node.indices.search.query.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_total.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Time spent performing query | <p>Time in seconds spent performing query operations for the last measuring span.</p> | DEPENDENT | es.node.indices.search.query_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE |
-| ES_cluster | ES {#ES.NODE}: Query latency | <p>The average query latency calculated by sampling the total number of queries and the total elapsed time at regular intervals.</p> | CALCULATED | es.node.indices.search.query_latency[{#ES.NODE}]<p>**Expression**:</p>`change(es.node.indices.search.query_time_in_millis[{#ES.NODE}]) / ( change(es.node.indices.search.query_total[{#ES.NODE}]) + (change(es.node.indices.search.query_total[{#ES.NODE}]) = 0) ) ` |
-| ES_cluster | ES {#ES.NODE}: Current query operations | <p>The number of query operations currently running.</p> | DEPENDENT | es.node.indices.search.query_current[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_current.first()`</p> |
-| ES_cluster | ES {#ES.NODE}: Rate of fetch | <p>The number of fetch operations per second.</p> | DEPENDENT | es.node.indices.search.fetch.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_total.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Time spent performing fetch | <p>Time in seconds spent performing fetch operations for the last measuring span.</p> | DEPENDENT | es.node.indices.search.fetch_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE |
-| ES_cluster | ES {#ES.NODE}: Fetch latency | <p>The average fetch latency calculated by sampling the total number of fetches and the total elapsed time at regular intervals.</p> | CALCULATED | es.node.indices.search.fetch_latency[{#ES.NODE}]<p>**Expression**:</p>`change(es.node.indices.search.fetch_time_in_millis[{#ES.NODE}]) / ( change(es.node.indices.search.fetch_total[{#ES.NODE}]) + (change(es.node.indices.search.fetch_total[{#ES.NODE}]) = 0) )` |
-| ES_cluster | ES {#ES.NODE}: Current fetch operations | <p>The number of fetch operations currently running.</p> | DEPENDENT | es.node.indices.search.fetch_current[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_current.first()`</p> |
-| ES_cluster | ES {#ES.NODE}: Write thread pool executor tasks completed | <p>The number of tasks completed by the write thread pool executor.</p> | DEPENDENT | es.node.thread_pool.write.completed.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.write.completed.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Write thread pool active threads | <p>The number of active threads in the write thread pool.</p> | DEPENDENT | es.node.thread_pool.write.active[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.write.active.first()`</p> |
-| ES_cluster | ES {#ES.NODE}: Write thread pool tasks in queue | <p>The number of tasks in queue for the write thread pool.</p> | DEPENDENT | es.node.thread_pool.write.queue[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.write.queue.first()`</p> |
-| ES_cluster | ES {#ES.NODE}: Write thread pool executor tasks rejected | <p>The number of tasks rejected by the write thread pool executor.</p> | DEPENDENT | es.node.thread_pool.write.rejected.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.write.rejected.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Search thread pool executor tasks completed | <p>The number of tasks completed by the search thread pool executor.</p> | DEPENDENT | es.node.thread_pool.search.completed.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.search.completed.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Search thread pool active threads | <p>The number of active threads in the search thread pool.</p> | DEPENDENT | es.node.thread_pool.search.active[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.search.active.first()`</p> |
-| ES_cluster | ES {#ES.NODE}: Search thread pool tasks in queue | <p>The number of tasks in queue for the search thread pool.</p> | DEPENDENT | es.node.thread_pool.search.queue[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.search.queue.first()`</p> |
-| ES_cluster | ES {#ES.NODE}: Search thread pool executor tasks rejected | <p>The number of tasks rejected by the search thread pool executor.</p> | DEPENDENT | es.node.thread_pool.search.rejected.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.search.rejected.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Refresh thread pool executor tasks completed | <p>The number of tasks completed by the refresh thread pool executor.</p> | DEPENDENT | es.node.thread_pool.refresh.completed.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.refresh.completed.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Refresh thread pool active threads | <p>The number of active threads in the refresh thread pool.</p> | DEPENDENT | es.node.thread_pool.refresh.active[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.refresh.active.first()`</p> |
-| ES_cluster | ES {#ES.NODE}: Refresh thread pool tasks in queue | <p>The number of tasks in queue for the refresh thread pool.</p> | DEPENDENT | es.node.thread_pool.refresh.queue[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.refresh.queue.first()`</p> |
-| ES_cluster | ES {#ES.NODE}: Refresh thread pool executor tasks rejected | <p>The number of tasks rejected by the refresh thread pool executor.</p> | DEPENDENT | es.node.thread_pool.refresh.rejected.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.refresh.rejected.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Indexing latency | <p>The average indexing latency calculated from the available index_total and index_time_in_millis metrics.</p> | CALCULATED | es.node.indices.indexing.index_latency[{#ES.NODE}]<p>**Expression**:</p>`change(es.node.indices.indexing.index_time_in_millis[{#ES.NODE}]) / ( change(es.node.indices.indexing.index_total[{#ES.NODE}]) + (change(es.node.indices.indexing.index_total[{#ES.NODE}]) = 0) )` |
-| ES_cluster | ES {#ES.NODE}: Current indexing operations | <p>The number of indexing operations currently running.</p> | DEPENDENT | es.node.indices.indexing.index_current[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.indexing.index_current.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| ES_cluster | ES {#ES.NODE}: Flush latency | <p>The average flush latency calculated from the available flush.total and flush.total_time_in_millis metrics.</p> | CALCULATED | es.node.indices.flush.latency[{#ES.NODE}]<p>**Expression**:</p>`change(es.node.indices.flush.total_time_in_millis[{#ES.NODE}]) / ( change(es.node.indices.flush.total[{#ES.NODE}]) + (change(es.node.indices.flush.total[{#ES.NODE}]) = 0) )` |
-| ES_cluster | ES {#ES.NODE}: Rate of index refreshes | <p>The number of refresh operations per second.</p> | DEPENDENT | es.node.indices.refresh.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.refresh.total.first()`</p><p>- CHANGE_PER_SECOND |
-| ES_cluster | ES {#ES.NODE}: Time spent performing refresh | <p>Time in seconds spent performing refresh operations for the last measuring span.</p> | DEPENDENT | es.node.indices.refresh.time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.refresh.total_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE |
-| Zabbix_raw_items | ES: Get cluster health | <p>Returns the health status of a cluster.</p> | HTTP_AGENT | es.cluster.get_health |
-| Zabbix_raw_items | ES: Get cluster stats | <p>Returns cluster statistics.</p> | HTTP_AGENT | es.cluster.get_stats |
-| Zabbix_raw_items | ES: Get nodes stats | <p>Returns cluster nodes statistics.</p> | HTTP_AGENT | es.nodes.get_stats |
-| Zabbix_raw_items | ES {#ES.NODE}: Total number of query | <p>The total number of query operations.</p> | DEPENDENT | es.node.indices.search.query_total[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_total.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zabbix_raw_items | ES {#ES.NODE}: Total time spent performing query | <p>Time in milliseconds spent performing query operations.</p> | DEPENDENT | es.node.indices.search.query_time_in_millis[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_time_in_millis.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zabbix_raw_items | ES {#ES.NODE}: Total number of fetch | <p>The total number of fetch operations.</p> | DEPENDENT | es.node.indices.search.fetch_total[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_total.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zabbix_raw_items | ES {#ES.NODE}: Total time spent performing fetch | <p>Time in milliseconds spent performing fetch operations.</p> | DEPENDENT | es.node.indices.search.fetch_time_in_millis[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_time_in_millis.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zabbix_raw_items | ES {#ES.NODE}: Total number of indexing | <p>The total number of indexing operations.</p> | DEPENDENT | es.node.indices.indexing.index_total[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.indexing.index_total.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zabbix_raw_items | ES {#ES.NODE}: Total time spent performing indexing | <p>Total time in milliseconds spent performing indexing operations.</p> | DEPENDENT | es.node.indices.indexing.index_time_in_millis[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.indexing.index_time_in_millis.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zabbix_raw_items | ES {#ES.NODE}: Total number of index flushes to disk | <p>The total number of flush operations.</p> | DEPENDENT | es.node.indices.flush.total[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.flush.total.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zabbix_raw_items | ES {#ES.NODE}: Total time spent on flushing indices to disk | <p>Total time in milliseconds spent performing flush operations.</p> | DEPENDENT | es.node.indices.flush.total_time_in_millis[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.flush.total_time_in_millis.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|ES_cluster |ES: Service status |<p>Checks if the service is running and accepting TCP connections.</p> |SIMPLE |net.tcp.service["{$ELASTICSEARCH.SCHEME}","{HOST.CONN}","{$ELASTICSEARCH.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|ES_cluster |ES: Service response time |<p>Checks performance of the TCP service.</p> |SIMPLE |net.tcp.service.perf["{$ELASTICSEARCH.SCHEME}","{HOST.CONN}","{$ELASTICSEARCH.PORT}"] |
+|ES_cluster |ES: Cluster health status |<p>Health status of the cluster, based on the state of its primary and replica shards. Statuses are:</p><p>green</p><p>All shards are assigned.</p><p>yellow</p><p>All primary shards are assigned, but one or more replica shards are unassigned. If a node in the cluster fails, some data could be unavailable until that node is repaired.</p><p>red</p><p>One or more primary shards are unassigned, so some data is unavailable. This can occur briefly during cluster startup as primary shards are assigned.</p> |DEPENDENT |es.cluster.status<p>**Preprocessing**:</p><p>- JSONPATH: `$.status`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Number of nodes |<p>The number of nodes within the cluster.</p> |DEPENDENT |es.cluster.number_of_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.number_of_nodes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Number of data nodes |<p>The number of nodes that are dedicated to data nodes.</p> |DEPENDENT |es.cluster.number_of_data_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.number_of_data_nodes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Number of relocating shards |<p>The number of shards that are under relocation.</p> |DEPENDENT |es.cluster.relocating_shards<p>**Preprocessing**:</p><p>- JSONPATH: `$.relocating_shards`</p> |
+|ES_cluster |ES: Number of initializing shards |<p>The number of shards that are under initialization.</p> |DEPENDENT |es.cluster.initializing_shards<p>**Preprocessing**:</p><p>- JSONPATH: `$.initializing_shards`</p> |
+|ES_cluster |ES: Number of unassigned shards |<p>The number of shards that are not allocated.</p> |DEPENDENT |es.cluster.unassigned_shards<p>**Preprocessing**:</p><p>- JSONPATH: `$.unassigned_shards`</p> |
+|ES_cluster |ES: Delayed unassigned shards |<p>The number of shards whose allocation has been delayed by the timeout settings.</p> |DEPENDENT |es.cluster.delayed_unassigned_shards<p>**Preprocessing**:</p><p>- JSONPATH: `$.delayed_unassigned_shards`</p> |
+|ES_cluster |ES: Number of pending tasks |<p>The number of cluster-level changes that have not yet been executed.</p> |DEPENDENT |es.cluster.number_of_pending_tasks<p>**Preprocessing**:</p><p>- JSONPATH: `$.number_of_pending_tasks`</p> |
+|ES_cluster |ES: Task max waiting in queue |<p>The time expressed in seconds since the earliest initiated task is waiting for being performed.</p> |DEPENDENT |es.cluster.task_max_waiting_in_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.task_max_waiting_in_queue_millis`</p><p>- MULTIPLIER: `0.001`</p> |
+|ES_cluster |ES: Inactive shards percentage |<p>The ratio of inactive shards in the cluster expressed as a percentage.</p> |DEPENDENT |es.cluster.inactive_shards_percent_as_number<p>**Preprocessing**:</p><p>- JSONPATH: `$.active_shards_percent_as_number`</p><p>- JAVASCRIPT: `return (100 - value)`</p> |
+|ES_cluster |ES: Cluster uptime |<p>Uptime duration in seconds since JVM has last started.</p> |DEPENDENT |es.nodes.jvm.max_uptime[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.jvm.max_uptime_in_millis`</p><p>- MULTIPLIER: `0.001`</p> |
+|ES_cluster |ES: Number of non-deleted documents |<p>The total number of non-deleted documents across all primary shards assigned to the selected nodes.</p><p>This number is based on the documents in Lucene segments and may include the documents from nested fields.</p> |DEPENDENT |es.indices.docs.count<p>**Preprocessing**:</p><p>- JSONPATH: `$.indices.docs.count`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Indices with shards assigned to nodes |<p>The total number of indices with shards assigned to the selected nodes.</p> |DEPENDENT |es.indices.count<p>**Preprocessing**:</p><p>- JSONPATH: `$.indices.count`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Total size of all file stores |<p>The total size in bytes of all file stores across all selected nodes.</p> |DEPENDENT |es.nodes.fs.total_in_bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.fs.total_in_bytes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Total available size to JVM in all file stores |<p>The total number of bytes available to JVM in the file stores across all selected nodes.</p><p>Depending on OS or process-level restrictions, this number may be less than nodes.fs.free_in_byes.</p><p>This is the actual amount of free disk space the selected Elasticsearch nodes can use.</p> |DEPENDENT |es.nodes.fs.available_in_bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.fs.available_in_bytes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Nodes with the data role |<p>The number of selected nodes with the data role.</p> |DEPENDENT |es.nodes.count.data<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.count.data`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Nodes with the ingest role |<p>The number of selected nodes with the ingest role.</p> |DEPENDENT |es.nodes.count.ingest<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.count.ingest`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES: Nodes with the master role |<p>The number of selected nodes with the master role.</p> |DEPENDENT |es.nodes.count.master<p>**Preprocessing**:</p><p>- JSONPATH: `$.nodes.count.master`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES {#ES.NODE}: Total size |<p>Total size (in bytes) of all file stores.</p> |DEPENDENT |es.node.fs.total.total_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].fs.total.total_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|ES_cluster |ES {#ES.NODE}: Total available size |<p>The total number of bytes available to this Java virtual machine on all file stores.</p><p>Depending on OS or process level restrictions, this might appear less than fs.total.free_in_bytes.</p><p>This is the actual amount of free disk space the Elasticsearch node can utilize.</p> |DEPENDENT |es.node.fs.total.available_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].fs.total.available_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES {#ES.NODE}: Node uptime |<p>JVM uptime in seconds.</p> |DEPENDENT |es.node.jvm.uptime[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.uptime_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p> |
+|ES_cluster |ES {#ES.NODE}: Maximum JVM memory available for use |<p>The maximum amount of memory, in bytes, available for use by the heap.</p> |DEPENDENT |es.node.jvm.mem.heap_max_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.mem.heap_max_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|ES_cluster |ES {#ES.NODE}: Amount of JVM heap currently in use |<p>The memory, in bytes, currently in use by the heap.</p> |DEPENDENT |es.node.jvm.mem.heap_used_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.mem.heap_used_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES {#ES.NODE}: Percent of JVM heap currently in use |<p>The percentage of memory currently in use by the heap.</p> |DEPENDENT |es.node.jvm.mem.heap_used_percent[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.mem.heap_used_percent.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES {#ES.NODE}: Amount of JVM heap committed |<p>The amount of memory, in bytes, available for use by the heap.</p> |DEPENDENT |es.node.jvm.mem.heap_committed_in_bytes[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].jvm.mem.heap_committed_in_bytes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES {#ES.NODE}: Number of open HTTP connections |<p>The number of currently open HTTP connections for the node.</p> |DEPENDENT |es.node.http.current_open[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].http.current_open.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES {#ES.NODE}: Rate of HTTP connections opened |<p>The number of HTTP connections opened for the node per second.</p> |DEPENDENT |es.node.http.opened.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].http.total_opened.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Time spent throttling operations |<p>Time in seconds spent throttling operations for the last measuring span.</p> |DEPENDENT |es.node.indices.indexing.throttle_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.indexing.throttle_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE</p> |
+|ES_cluster |ES {#ES.NODE}: Time spent throttling recovery operations |<p>Time in seconds spent throttling recovery operations for the last measuring span.</p> |DEPENDENT |es.node.indices.recovery.throttle_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.recovery.throttle_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE</p> |
+|ES_cluster |ES {#ES.NODE}: Time spent throttling merge operations |<p>Time in seconds spent throttling merge operations for the last measuring span.</p> |DEPENDENT |es.node.indices.merges.total_throttled_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.merges.total_throttled_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE</p> |
+|ES_cluster |ES {#ES.NODE}: Rate of queries |<p>The number of query operations per second.</p> |DEPENDENT |es.node.indices.search.query.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_total.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Time spent performing query |<p>Time in seconds spent performing query operations for the last measuring span.</p> |DEPENDENT |es.node.indices.search.query_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE</p> |
+|ES_cluster |ES {#ES.NODE}: Query latency |<p>The average query latency calculated by sampling the total number of queries and the total elapsed time at regular intervals.</p> |CALCULATED |es.node.indices.search.query_latency[{#ES.NODE}]<p>**Expression**:</p>`change(//es.node.indices.search.query_time_in_millis[{#ES.NODE}]) / ( change(//es.node.indices.search.query_total[{#ES.NODE}]) + (change(//es.node.indices.search.query_total[{#ES.NODE}]) = 0) ) ` |
+|ES_cluster |ES {#ES.NODE}: Current query operations |<p>The number of query operations currently running.</p> |DEPENDENT |es.node.indices.search.query_current[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_current.first()`</p> |
+|ES_cluster |ES {#ES.NODE}: Rate of fetch |<p>The number of fetch operations per second.</p> |DEPENDENT |es.node.indices.search.fetch.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_total.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Time spent performing fetch |<p>Time in seconds spent performing fetch operations for the last measuring span.</p> |DEPENDENT |es.node.indices.search.fetch_time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE</p> |
+|ES_cluster |ES {#ES.NODE}: Fetch latency |<p>The average fetch latency calculated by sampling the total number of fetches and the total elapsed time at regular intervals.</p> |CALCULATED |es.node.indices.search.fetch_latency[{#ES.NODE}]<p>**Expression**:</p>`change(//es.node.indices.search.fetch_time_in_millis[{#ES.NODE}]) / ( change(//es.node.indices.search.fetch_total[{#ES.NODE}]) + (change(//es.node.indices.search.fetch_total[{#ES.NODE}]) = 0) )` |
+|ES_cluster |ES {#ES.NODE}: Current fetch operations |<p>The number of fetch operations currently running.</p> |DEPENDENT |es.node.indices.search.fetch_current[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_current.first()`</p> |
+|ES_cluster |ES {#ES.NODE}: Write thread pool executor tasks completed |<p>The number of tasks completed by the write thread pool executor.</p> |DEPENDENT |es.node.thread_pool.write.completed.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.write.completed.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Write thread pool active threads |<p>The number of active threads in the write thread pool.</p> |DEPENDENT |es.node.thread_pool.write.active[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.write.active.first()`</p> |
+|ES_cluster |ES {#ES.NODE}: Write thread pool tasks in queue |<p>The number of tasks in queue for the write thread pool.</p> |DEPENDENT |es.node.thread_pool.write.queue[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.write.queue.first()`</p> |
+|ES_cluster |ES {#ES.NODE}: Write thread pool executor tasks rejected |<p>The number of tasks rejected by the write thread pool executor.</p> |DEPENDENT |es.node.thread_pool.write.rejected.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.write.rejected.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Search thread pool executor tasks completed |<p>The number of tasks completed by the search thread pool executor.</p> |DEPENDENT |es.node.thread_pool.search.completed.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.search.completed.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Search thread pool active threads |<p>The number of active threads in the search thread pool.</p> |DEPENDENT |es.node.thread_pool.search.active[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.search.active.first()`</p> |
+|ES_cluster |ES {#ES.NODE}: Search thread pool tasks in queue |<p>The number of tasks in queue for the search thread pool.</p> |DEPENDENT |es.node.thread_pool.search.queue[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.search.queue.first()`</p> |
+|ES_cluster |ES {#ES.NODE}: Search thread pool executor tasks rejected |<p>The number of tasks rejected by the search thread pool executor.</p> |DEPENDENT |es.node.thread_pool.search.rejected.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.search.rejected.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Refresh thread pool executor tasks completed |<p>The number of tasks completed by the refresh thread pool executor.</p> |DEPENDENT |es.node.thread_pool.refresh.completed.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.refresh.completed.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Refresh thread pool active threads |<p>The number of active threads in the refresh thread pool.</p> |DEPENDENT |es.node.thread_pool.refresh.active[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.refresh.active.first()`</p> |
+|ES_cluster |ES {#ES.NODE}: Refresh thread pool tasks in queue |<p>The number of tasks in queue for the refresh thread pool.</p> |DEPENDENT |es.node.thread_pool.refresh.queue[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.refresh.queue.first()`</p> |
+|ES_cluster |ES {#ES.NODE}: Refresh thread pool executor tasks rejected |<p>The number of tasks rejected by the refresh thread pool executor.</p> |DEPENDENT |es.node.thread_pool.refresh.rejected.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].thread_pool.refresh.rejected.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Indexing latency |<p>The average indexing latency calculated from the available index_total and index_time_in_millis metrics.</p> |CALCULATED |es.node.indices.indexing.index_latency[{#ES.NODE}]<p>**Expression**:</p>`change(//es.node.indices.indexing.index_time_in_millis[{#ES.NODE}]) / ( change(//es.node.indices.indexing.index_total[{#ES.NODE}]) + (change(//es.node.indices.indexing.index_total[{#ES.NODE}]) = 0) )` |
+|ES_cluster |ES {#ES.NODE}: Current indexing operations |<p>The number of indexing operations currently running.</p> |DEPENDENT |es.node.indices.indexing.index_current[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.indexing.index_current.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|ES_cluster |ES {#ES.NODE}: Flush latency |<p>The average flush latency calculated from the available flush.total and flush.total_time_in_millis metrics.</p> |CALCULATED |es.node.indices.flush.latency[{#ES.NODE}]<p>**Expression**:</p>`change(//es.node.indices.flush.total_time_in_millis[{#ES.NODE}]) / ( change(//es.node.indices.flush.total[{#ES.NODE}]) + (change(//es.node.indices.flush.total[{#ES.NODE}]) = 0) )` |
+|ES_cluster |ES {#ES.NODE}: Rate of index refreshes |<p>The number of refresh operations per second.</p> |DEPENDENT |es.node.indices.refresh.rate[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.refresh.total.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|ES_cluster |ES {#ES.NODE}: Time spent performing refresh |<p>Time in seconds spent performing refresh operations for the last measuring span.</p> |DEPENDENT |es.node.indices.refresh.time[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.refresh.total_time_in_millis.first()`</p><p>- MULTIPLIER: `0.001`</p><p>- SIMPLE_CHANGE</p> |
+|Zabbix_raw_items |ES: Get cluster health |<p>Returns the health status of a cluster.</p> |HTTP_AGENT |es.cluster.get_health |
+|Zabbix_raw_items |ES: Get cluster stats |<p>Returns cluster statistics.</p> |HTTP_AGENT |es.cluster.get_stats |
+|Zabbix_raw_items |ES: Get nodes stats |<p>Returns cluster nodes statistics.</p> |HTTP_AGENT |es.nodes.get_stats |
+|Zabbix_raw_items |ES {#ES.NODE}: Total number of query |<p>The total number of query operations.</p> |DEPENDENT |es.node.indices.search.query_total[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_total.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zabbix_raw_items |ES {#ES.NODE}: Total time spent performing query |<p>Time in milliseconds spent performing query operations.</p> |DEPENDENT |es.node.indices.search.query_time_in_millis[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.query_time_in_millis.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zabbix_raw_items |ES {#ES.NODE}: Total number of fetch |<p>The total number of fetch operations.</p> |DEPENDENT |es.node.indices.search.fetch_total[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_total.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zabbix_raw_items |ES {#ES.NODE}: Total time spent performing fetch |<p>Time in milliseconds spent performing fetch operations.</p> |DEPENDENT |es.node.indices.search.fetch_time_in_millis[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.search.fetch_time_in_millis.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zabbix_raw_items |ES {#ES.NODE}: Total number of indexing |<p>The total number of indexing operations.</p> |DEPENDENT |es.node.indices.indexing.index_total[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.indexing.index_total.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zabbix_raw_items |ES {#ES.NODE}: Total time spent performing indexing |<p>Total time in milliseconds spent performing indexing operations.</p> |DEPENDENT |es.node.indices.indexing.index_time_in_millis[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.indexing.index_time_in_millis.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zabbix_raw_items |ES {#ES.NODE}: Total number of index flushes to disk |<p>The total number of flush operations.</p> |DEPENDENT |es.node.indices.flush.total[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.flush.total.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zabbix_raw_items |ES {#ES.NODE}: Total time spent on flushing indices to disk |<p>Total time in milliseconds spent performing flush operations.</p> |DEPENDENT |es.node.indices.flush.total_time_in_millis[{#ES.NODE}]<p>**Preprocessing**:</p><p>- JSONPATH: `$..[?(@.name=='{#ES.NODE}')].indices.flush.total_time_in_millis.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|----------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------|
-| ES: Service is down | <p>The service is unavailable or does not accept TCP connections.</p> | `{TEMPLATE_NAME:net.tcp.service["{$ELASTICSEARCH.SCHEME}","{HOST.CONN}","{$ELASTICSEARCH.PORT}"].last()}=0` | AVERAGE | <p>Manual close: YES</p> |
-| ES: Service response time is too high (over {$ELASTICSEARCH.RESPONSE_TIME.MAX.WARN} for 5m) | <p>The performance of the TCP service is very low.</p> | `{TEMPLATE_NAME:net.tcp.service.perf["{$ELASTICSEARCH.SCHEME}","{HOST.CONN}","{$ELASTICSEARCH.PORT}"].min(5m)}>{$ELASTICSEARCH.RESPONSE_TIME.MAX.WARN}` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- ES: Service is down</p> |
-| ES: Health is YELLOW | <p>All primary shards are assigned, but one or more replica shards are unassigned. </p><p>If a node in the cluster fails, some data could be unavailable until that node is repaired.</p> | `{TEMPLATE_NAME:es.cluster.status.last()}=1` | AVERAGE | |
-| ES: Health is RED | <p>One or more primary shards are unassigned, so some data is unavailable. </p><p>This can occur briefly during cluster startup as primary shards are assigned.</p> | `{TEMPLATE_NAME:es.cluster.status.last()}=2` | HIGH | |
-| ES: Health is UNKNOWN | <p>The health status of the cluster is unknown or cannot be obtained.</p> | `{TEMPLATE_NAME:es.cluster.status.last()}=255` | HIGH | |
-| ES: The number of nodes within the cluster has decreased | | `{TEMPLATE_NAME:es.cluster.number_of_nodes.change()}<0` | INFO | <p>Manual close: YES</p> |
-| ES: The number of nodes within the cluster has increased | | `{TEMPLATE_NAME:es.cluster.number_of_nodes.change()}>0` | INFO | <p>Manual close: YES</p> |
-| ES: Cluster has the initializing shards | <p>The cluster has the initializing shards longer than 10 minutes.</p> | `{TEMPLATE_NAME:es.cluster.initializing_shards.min(10m)}>0` | AVERAGE | |
-| ES: Cluster has the unassigned shards | <p>The cluster has the unassigned shards longer than 10 minutes.</p> | `{TEMPLATE_NAME:es.cluster.unassigned_shards.min(10m)}>0` | AVERAGE | |
-| ES: Cluster has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:es.nodes.jvm.max_uptime[{#ES.NODE}].last()}<10m` | INFO | <p>Manual close: YES</p> |
-| ES: Cluster does not have enough space for resharding | <p>There is not enough disk space for index resharding.</p> | `({Elasticsearch Cluster by HTTP:es.nodes.fs.total_in_bytes.last()}-{TEMPLATE_NAME:es.nodes.fs.available_in_bytes.last()})/({Elasticsearch Cluster by HTTP:es.cluster.number_of_data_nodes.last()}-1)>{TEMPLATE_NAME:es.nodes.fs.available_in_bytes.last()}` | HIGH | |
-| ES: Cluster has only two master nodes | <p>The cluster has only two nodes with a master role and will be unavailable if one of them breaks.</p> | `{TEMPLATE_NAME:es.nodes.count.master.last()}=2` | DISASTER | |
-| ES {#ES.NODE}: Node {#ES.NODE} has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:es.node.jvm.uptime[{#ES.NODE}].last()}<10m` | INFO | <p>Manual close: YES</p> |
-| ES {#ES.NODE}: Percent of JVM heap in use is high (over {$ELASTICSEARCH.HEAP_USED.MAX.WARN}% for 1h) | <p>This indicates that the rate of garbage collection isn’t keeping up with the rate of garbage creation. </p><p>To address this problem, you can either increase your heap size (as long as it remains below the recommended </p><p>guidelines stated above), or scale out the cluster by adding more nodes.</p> | `{TEMPLATE_NAME:es.node.jvm.mem.heap_used_percent[{#ES.NODE}].min(1h)}>{$ELASTICSEARCH.HEAP_USED.MAX.WARN}` | WARNING | <p>**Depends on**:</p><p>- ES {#ES.NODE}: Percent of JVM heap in use is critical (over {$ELASTICSEARCH.HEAP_USED.MAX.CRIT}% for 1h)</p> |
-| ES {#ES.NODE}: Percent of JVM heap in use is critical (over {$ELASTICSEARCH.HEAP_USED.MAX.CRIT}% for 1h) | <p>This indicates that the rate of garbage collection isn’t keeping up with the rate of garbage creation. </p><p>To address this problem, you can either increase your heap size (as long as it remains below the recommended </p><p>guidelines stated above), or scale out the cluster by adding more nodes.</p> | `{TEMPLATE_NAME:es.node.jvm.mem.heap_used_percent[{#ES.NODE}].min(1h)}>{$ELASTICSEARCH.HEAP_USED.MAX.CRIT}` | HIGH | |
-| ES {#ES.NODE}: Query latency is too high (over {$ELASTICSEARCH.QUERY_LATENCY.MAX.WARN}ms for 5m) | <p>If latency exceeds a threshold, look for potential resource bottlenecks, or investigate whether you need to optimize your queries.</p> | `{TEMPLATE_NAME:es.node.indices.search.query_latency[{#ES.NODE}].min(5m)}>{$ELASTICSEARCH.QUERY_LATENCY.MAX.WARN}` | WARNING | |
-| ES {#ES.NODE}: Fetch latency is too high (over {$ELASTICSEARCH.FETCH_LATENCY.MAX.WARN}ms for 5m) | <p>The fetch phase should typically take much less time than the query phase. If you notice this metric consistently increasing, </p><p>this could indicate a problem with slow disks, enriching of documents (highlighting the relevant text in search results, etc.), </p><p>or requesting too many results.</p> | `{TEMPLATE_NAME:es.node.indices.search.fetch_latency[{#ES.NODE}].min(5m)}>{$ELASTICSEARCH.FETCH_LATENCY.MAX.WARN}` | WARNING | |
-| ES {#ES.NODE}: Write thread pool executor has the rejected tasks (for 5m) | <p>The number of tasks rejected by the write thread pool executor is over 0 for 5m.</p> | `{TEMPLATE_NAME:es.node.thread_pool.write.rejected.rate[{#ES.NODE}].min(5m)}>0` | WARNING | |
-| ES {#ES.NODE}: Search thread pool executor has the rejected tasks (for 5m) | <p>The number of tasks rejected by the search thread pool executor is over 0 for 5m.</p> | `{TEMPLATE_NAME:es.node.thread_pool.search.rejected.rate[{#ES.NODE}].min(5m)}>0` | WARNING | |
-| ES {#ES.NODE}: Refresh thread pool executor has the rejected tasks (for 5m) | <p>The number of tasks rejected by the refresh thread pool executor is over 0 for 5m.</p> | `{TEMPLATE_NAME:es.node.thread_pool.refresh.rejected.rate[{#ES.NODE}].min(5m)}>0` | WARNING | |
-| ES {#ES.NODE}: Indexing latency is too high (over {$ELASTICSEARCH.INDEXING_LATENCY.MAX.WARN}ms for 5m) | <p>If the latency is increasing, it may indicate that you are indexing too many documents at the same time (Elasticsearch’s documentation </p><p>recommends starting with a bulk indexing size of 5 to 15 megabytes and increasing slowly from there).</p> | `{TEMPLATE_NAME:es.node.indices.indexing.index_latency[{#ES.NODE}].min(5m)}>{$ELASTICSEARCH.INDEXING_LATENCY.MAX.WARN}` | WARNING | |
-| ES {#ES.NODE}: Flush latency is too high (over {$ELASTICSEARCH.FLUSH_LATENCY.MAX.WARN}ms for 5m) | <p>If you see this metric increasing steadily, it may indicate a problem with slow disks; this problem may escalate </p><p>and eventually prevent you from being able to add new information to your index.</p> | `{TEMPLATE_NAME:es.node.indices.flush.latency[{#ES.NODE}].min(5m)}>{$ELASTICSEARCH.FLUSH_LATENCY.MAX.WARN}` | WARNING | |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|ES: Service is down |<p>The service is unavailable or does not accept TCP connections.</p> |`last(/Elasticsearch Cluster by HTTP/net.tcp.service["{$ELASTICSEARCH.SCHEME}","{HOST.CONN}","{$ELASTICSEARCH.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|ES: Service response time is too high (over {$ELASTICSEARCH.RESPONSE_TIME.MAX.WARN} for 5m) |<p>The performance of the TCP service is very low.</p> |`min(/Elasticsearch Cluster by HTTP/net.tcp.service.perf["{$ELASTICSEARCH.SCHEME}","{HOST.CONN}","{$ELASTICSEARCH.PORT}"],5m)>{$ELASTICSEARCH.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- ES: Service is down</p> |
+|ES: Health is YELLOW |<p>All primary shards are assigned, but one or more replica shards are unassigned.</p><p>If a node in the cluster fails, some data could be unavailable until that node is repaired.</p> |`last(/Elasticsearch Cluster by HTTP/es.cluster.status)=1` |AVERAGE | |
+|ES: Health is RED |<p>One or more primary shards are unassigned, so some data is unavailable.</p><p>This can occur briefly during cluster startup as primary shards are assigned.</p> |`last(/Elasticsearch Cluster by HTTP/es.cluster.status)=2` |HIGH | |
+|ES: Health is UNKNOWN |<p>The health status of the cluster is unknown or cannot be obtained.</p> |`last(/Elasticsearch Cluster by HTTP/es.cluster.status)=255` |HIGH | |
+|ES: The number of nodes within the cluster has decreased |<p>-</p> |`change(/Elasticsearch Cluster by HTTP/es.cluster.number_of_nodes)<0` |INFO |<p>Manual close: YES</p> |
+|ES: The number of nodes within the cluster has increased |<p>-</p> |`change(/Elasticsearch Cluster by HTTP/es.cluster.number_of_nodes)>0` |INFO |<p>Manual close: YES</p> |
+|ES: Cluster has the initializing shards |<p>The cluster has the initializing shards longer than 10 minutes.</p> |`min(/Elasticsearch Cluster by HTTP/es.cluster.initializing_shards,10m)>0` |AVERAGE | |
+|ES: Cluster has the unassigned shards |<p>The cluster has the unassigned shards longer than 10 minutes.</p> |`min(/Elasticsearch Cluster by HTTP/es.cluster.unassigned_shards,10m)>0` |AVERAGE | |
+|ES: Cluster has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Elasticsearch Cluster by HTTP/es.nodes.jvm.max_uptime[{#ES.NODE}])<10m` |INFO |<p>Manual close: YES</p> |
+|ES: Cluster does not have enough space for resharding |<p>There is not enough disk space for index resharding.</p> |`(last(/Elasticsearch Cluster by HTTP/es.nodes.fs.total_in_bytes)-last(/Elasticsearch Cluster by HTTP/es.nodes.fs.available_in_bytes))/(last(/Elasticsearch Cluster by HTTP/es.cluster.number_of_data_nodes)-1)>last(/Elasticsearch Cluster by HTTP/es.nodes.fs.available_in_bytes)` |HIGH | |
+|ES: Cluster has only two master nodes |<p>The cluster has only two nodes with a master role and will be unavailable if one of them breaks.</p> |`last(/Elasticsearch Cluster by HTTP/es.nodes.count.master)=2` |DISASTER | |
+|ES {#ES.NODE}: Node {#ES.NODE} has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Elasticsearch Cluster by HTTP/es.node.jvm.uptime[{#ES.NODE}])<10m` |INFO |<p>Manual close: YES</p> |
+|ES {#ES.NODE}: Percent of JVM heap in use is high (over {$ELASTICSEARCH.HEAP_USED.MAX.WARN}% for 1h) |<p>This indicates that the rate of garbage collection isn't keeping up with the rate of garbage creation.</p><p>To address this problem, you can either increase your heap size (as long as it remains below the recommended</p><p>guidelines stated above), or scale out the cluster by adding more nodes.</p> |`min(/Elasticsearch Cluster by HTTP/es.node.jvm.mem.heap_used_percent[{#ES.NODE}],1h)>{$ELASTICSEARCH.HEAP_USED.MAX.WARN}` |WARNING |<p>**Depends on**:</p><p>- ES {#ES.NODE}: Percent of JVM heap in use is critical (over {$ELASTICSEARCH.HEAP_USED.MAX.CRIT}% for 1h)</p> |
+|ES {#ES.NODE}: Percent of JVM heap in use is critical (over {$ELASTICSEARCH.HEAP_USED.MAX.CRIT}% for 1h) |<p>This indicates that the rate of garbage collection isn't keeping up with the rate of garbage creation.</p><p>To address this problem, you can either increase your heap size (as long as it remains below the recommended</p><p>guidelines stated above), or scale out the cluster by adding more nodes.</p> |`min(/Elasticsearch Cluster by HTTP/es.node.jvm.mem.heap_used_percent[{#ES.NODE}],1h)>{$ELASTICSEARCH.HEAP_USED.MAX.CRIT}` |HIGH | |
+|ES {#ES.NODE}: Query latency is too high (over {$ELASTICSEARCH.QUERY_LATENCY.MAX.WARN}ms for 5m) |<p>If latency exceeds a threshold, look for potential resource bottlenecks, or investigate whether you need to optimize your queries.</p> |`min(/Elasticsearch Cluster by HTTP/es.node.indices.search.query_latency[{#ES.NODE}],5m)>{$ELASTICSEARCH.QUERY_LATENCY.MAX.WARN}` |WARNING | |
+|ES {#ES.NODE}: Fetch latency is too high (over {$ELASTICSEARCH.FETCH_LATENCY.MAX.WARN}ms for 5m) |<p>The fetch phase should typically take much less time than the query phase. If you notice this metric consistently increasing,</p><p>this could indicate a problem with slow disks, enriching of documents (highlighting the relevant text in search results, etc.),</p><p>or requesting too many results.</p> |`min(/Elasticsearch Cluster by HTTP/es.node.indices.search.fetch_latency[{#ES.NODE}],5m)>{$ELASTICSEARCH.FETCH_LATENCY.MAX.WARN}` |WARNING | |
+|ES {#ES.NODE}: Write thread pool executor has the rejected tasks (for 5m) |<p>The number of tasks rejected by the write thread pool executor is over 0 for 5m.</p> |`min(/Elasticsearch Cluster by HTTP/es.node.thread_pool.write.rejected.rate[{#ES.NODE}],5m)>0` |WARNING | |
+|ES {#ES.NODE}: Search thread pool executor has the rejected tasks (for 5m) |<p>The number of tasks rejected by the search thread pool executor is over 0 for 5m.</p> |`min(/Elasticsearch Cluster by HTTP/es.node.thread_pool.search.rejected.rate[{#ES.NODE}],5m)>0` |WARNING | |
+|ES {#ES.NODE}: Refresh thread pool executor has the rejected tasks (for 5m) |<p>The number of tasks rejected by the refresh thread pool executor is over 0 for 5m.</p> |`min(/Elasticsearch Cluster by HTTP/es.node.thread_pool.refresh.rejected.rate[{#ES.NODE}],5m)>0` |WARNING | |
+|ES {#ES.NODE}: Indexing latency is too high (over {$ELASTICSEARCH.INDEXING_LATENCY.MAX.WARN}ms for 5m) |<p>If the latency is increasing, it may indicate that you are indexing too many documents at the same time (Elasticsearch's documentation</p><p>recommends starting with a bulk indexing size of 5 to 15 megabytes and increasing slowly from there).</p> |`min(/Elasticsearch Cluster by HTTP/es.node.indices.indexing.index_latency[{#ES.NODE}],5m)>{$ELASTICSEARCH.INDEXING_LATENCY.MAX.WARN}` |WARNING | |
+|ES {#ES.NODE}: Flush latency is too high (over {$ELASTICSEARCH.FLUSH_LATENCY.MAX.WARN}ms for 5m) |<p>If you see this metric increasing steadily, it may indicate a problem with slow disks; this problem may escalate</p><p>and eventually prevent you from being able to add new information to your index.</p> |`min(/Elasticsearch Cluster by HTTP/es.node.indices.flush.latency[{#ES.NODE}],5m)>{$ELASTICSEARCH.FLUSH_LATENCY.MAX.WARN}` |WARNING | |
## Feedback
diff --git a/templates/app/elasticsearch_http/template_app_elasticsearch_http.yaml b/templates/app/elasticsearch_http/template_app_elasticsearch_http.yaml
index 96ff080fb3a..5a20f653c53 100644
--- a/templates/app/elasticsearch_http/template_app_elasticsearch_http.yaml
+++ b/templates/app/elasticsearch_http/template_app_elasticsearch_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:18Z'
+ date: '2021-12-19T15:19:33Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -13,7 +13,7 @@ zabbix_export:
description: |
The template to monitor Elasticsearch by Zabbix that work without any external scripts.
It works with both standalone and cluster instances.
- The metrics are collected in one pass remotely using an HTTP agent.
+ The metrics are collected in one pass remotely using an HTTP agent.
They are getting values from REST API _cluster/health, _cluster/stats, _nodes/stats requests.
You can set {$ELASTICSEARCH.USERNAME} and {$ELASTICSEARCH.PASSWORD} macros in the template for using on the host level.
If you use an atypical location ES API, don't forget to change the macros {$ELASTICSEARCH.SCHEME},{$ELASTICSEARCH.PORT}.
@@ -21,7 +21,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/399473-discussion-thread-for-official-zabbix-template-for-elasticsearch
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -59,7 +59,6 @@ zabbix_export:
description: 'Returns the health status of a cluster.'
timeout: 15s
url: '{$ELASTICSEARCH.SCHEME}://{HOST.CONN}:{$ELASTICSEARCH.PORT}/_cluster/health?timeout=5s'
- status_codes: ''
tags:
-
tag: Application
@@ -276,7 +275,7 @@ zabbix_export:
name: 'ES: Health is RED'
priority: HIGH
description: |
- One or more primary shards are unassigned, so some data is unavailable.
+ One or more primary shards are unassigned, so some data is unavailable.
This can occur briefly during cluster startup as primary shards are assigned.
-
uuid: 33a6ef13f7b240768841919482709411
@@ -290,7 +289,7 @@ zabbix_export:
name: 'ES: Health is YELLOW'
priority: AVERAGE
description: |
- All primary shards are assigned, but one or more replica shards are unassigned.
+ All primary shards are assigned, but one or more replica shards are unassigned.
If a node in the cluster fails, some data could be unavailable until that node is repaired.
-
uuid: 2295e2ba3d4949feb3519ac85ba6ff86
@@ -477,7 +476,7 @@ zabbix_export:
units: B
description: |
The total number of bytes available to JVM in the file stores across all selected nodes.
- Depending on OS or process-level restrictions, this number may be less than nodes.fs.free_in_byes.
+ Depending on OS or process-level restrictions, this number may be less than nodes.fs.free_in_byes.
This is the actual amount of free disk space the selected Elasticsearch nodes can use.
preprocessing:
-
@@ -641,8 +640,8 @@ zabbix_export:
history: 7d
units: B
description: |
- The total number of bytes available to this Java virtual machine on all file stores.
- Depending on OS or process level restrictions, this might appear less than fs.total.free_in_bytes.
+ The total number of bytes available to this Java virtual machine on all file stores.
+ Depending on OS or process level restrictions, this might appear less than fs.total.free_in_bytes.
This is the actual amount of free disk space the Elasticsearch node can utilize.
preprocessing:
-
@@ -752,7 +751,7 @@ zabbix_export:
name: 'ES {#ES.NODE}: Flush latency is too high (over {$ELASTICSEARCH.FLUSH_LATENCY.MAX.WARN}ms for 5m)'
priority: WARNING
description: |
- If you see this metric increasing steadily, it may indicate a problem with slow disks; this problem may escalate
+ If you see this metric increasing steadily, it may indicate a problem with slow disks; this problem may escalate
and eventually prevent you from being able to add new information to your index.
-
uuid: 8e270dfff9c84d2a96a134dd6d86533b
@@ -845,7 +844,7 @@ zabbix_export:
name: 'ES {#ES.NODE}: Indexing latency is too high (over {$ELASTICSEARCH.INDEXING_LATENCY.MAX.WARN}ms for 5m)'
priority: WARNING
description: |
- If the latency is increasing, it may indicate that you are indexing too many documents at the same time (Elasticsearch’s documentation
+ If the latency is increasing, it may indicate that you are indexing too many documents at the same time (Elasticsearch's documentation
recommends starting with a bulk indexing size of 5 to 15 megabytes and increasing slowly from there).
-
uuid: f471dad45ff149b09a479963cb616fc2
@@ -1100,8 +1099,8 @@ zabbix_export:
name: 'ES {#ES.NODE}: Fetch latency is too high (over {$ELASTICSEARCH.FETCH_LATENCY.MAX.WARN}ms for 5m)'
priority: WARNING
description: |
- The fetch phase should typically take much less time than the query phase. If you notice this metric consistently increasing,
- this could indicate a problem with slow disks, enriching of documents (highlighting the relevant text in search results, etc.),
+ The fetch phase should typically take much less time than the query phase. If you notice this metric consistently increasing,
+ this could indicate a problem with slow disks, enriching of documents (highlighting the relevant text in search results, etc.),
or requesting too many results.
-
uuid: a10e7dca72c8411a9b7fdcbeb676017e
@@ -1426,8 +1425,8 @@ zabbix_export:
name: 'ES {#ES.NODE}: Percent of JVM heap in use is critical (over {$ELASTICSEARCH.HEAP_USED.MAX.CRIT}% for 1h)'
priority: HIGH
description: |
- This indicates that the rate of garbage collection isn’t keeping up with the rate of garbage creation.
- To address this problem, you can either increase your heap size (as long as it remains below the recommended
+ This indicates that the rate of garbage collection isn't keeping up with the rate of garbage creation.
+ To address this problem, you can either increase your heap size (as long as it remains below the recommended
guidelines stated above), or scale out the cluster by adding more nodes.
-
uuid: bbba4a577a2c4328b2392fdeb1ff9bb4
@@ -1435,8 +1434,8 @@ zabbix_export:
name: 'ES {#ES.NODE}: Percent of JVM heap in use is high (over {$ELASTICSEARCH.HEAP_USED.MAX.WARN}% for 1h)'
priority: WARNING
description: |
- This indicates that the rate of garbage collection isn’t keeping up with the rate of garbage creation.
- To address this problem, you can either increase your heap size (as long as it remains below the recommended
+ This indicates that the rate of garbage collection isn't keeping up with the rate of garbage creation.
+ To address this problem, you can either increase your heap size (as long as it remains below the recommended
guidelines stated above), or scale out the cluster by adding more nodes.
dependencies:
-
diff --git a/templates/app/etcd_http/README.md b/templates/app/etcd_http/README.md
index 84855d373a3..6795e1a96c7 100644
--- a/templates/app/etcd_http/README.md
+++ b/templates/app/etcd_http/README.md
@@ -45,18 +45,18 @@ No specific Zabbix configuration is required.
|Name|Description|Default|
|----|-----------|-------|
-|{$ETCD.GRPC.ERRORS.MAX.WARN} |<p>Maximum number of gRPC requests failures</p> |`1` |
-|{$ETCD.GRPC_CODE.MATCHES} |<p>Filter of discoverable gRPC codes https://github.com/grpc/grpc/blob/master/doc/statuscodes.md</p> |`.*` |
-|{$ETCD.GRPC_CODE.NOT_MATCHES} |<p>Filter to exclude discovered gRPC codes https://github.com/grpc/grpc/blob/master/doc/statuscodes.md</p> |`CHANGE_IF_NEEDED` |
-|{$ETCD.GRPC_CODE.TRIGGER.MATCHES} |<p>Filter of discoverable gRPC codes which will be create triggers</p> |`Aborted|Unavailable` |
-|{$ETCD.HTTP.FAIL.MAX.WARN} |<p>Maximum number of HTTP requests failures</p> |`2` |
-|{$ETCD.LEADER.CHANGES.MAX.WARN} |<p>Maximum number of leader changes</p> |`5` |
-|{$ETCD.OPEN.FDS.MAX.WARN} |<p>Maximum percentage of used file descriptors</p> |`90` |
+|{$ETCD.GRPC.ERRORS.MAX.WARN} |<p>Maximum number of gRPC requests failures.</p> |`1` |
+|{$ETCD.GRPC_CODE.MATCHES} |<p>Filter of discoverable gRPC codes https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.</p> |`.*` |
+|{$ETCD.GRPC_CODE.NOT_MATCHES} |<p>Filter to exclude discovered gRPC codes https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.</p> |`CHANGE_IF_NEEDED` |
+|{$ETCD.GRPC_CODE.TRIGGER.MATCHES} |<p>Filter of discoverable gRPC codes which will be create triggers.</p> |`Aborted|Unavailable` |
+|{$ETCD.HTTP.FAIL.MAX.WARN} |<p>Maximum number of HTTP requests failures.</p> |`2` |
+|{$ETCD.LEADER.CHANGES.MAX.WARN} |<p>Maximum number of leader changes.</p> |`5` |
+|{$ETCD.OPEN.FDS.MAX.WARN} |<p>Maximum percentage of used file descriptors.</p> |`90` |
|{$ETCD.PASSWORD} |<p>-</p> |`` |
-|{$ETCD.PORT} |<p>The port of Etcd API endpoint</p> |`2379` |
-|{$ETCD.PROPOSAL.FAIL.MAX.WARN} |<p>Maximum number of proposal failures</p> |`2` |
-|{$ETCD.PROPOSAL.PENDING.MAX.WARN} |<p>Maximum number of proposals in queue</p> |`5` |
-|{$ETCD.SCHEME} |<p>Request scheme which may be http or https</p> |`http` |
+|{$ETCD.PORT} |<p>The port of Etcd API endpoint.</p> |`2379` |
+|{$ETCD.PROPOSAL.FAIL.MAX.WARN} |<p>Maximum number of proposal failures.</p> |`2` |
+|{$ETCD.PROPOSAL.PENDING.MAX.WARN} |<p>Maximum number of proposals in queue.</p> |`5` |
+|{$ETCD.SCHEME} |<p>Request scheme which may be http or https.</p> |`http` |
|{$ETCD.USER} |<p>-</p> |`` |
## Template links
@@ -75,46 +75,46 @@ There are no template links in this template.
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
|Etcd |Etcd: Service's TCP port state |<p>-</p> |SIMPLE |net.tcp.service["{$ETCD.SCHEME}","{HOST.CONN}","{$ETCD.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-|Etcd |Etcd: Node health |<p>-</p> |HTTP_AGENT |etcd.health<p>**Preprocessing**:</p><p>- JSONPATH: `$.health`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-|Etcd |Etcd: Server is a leader |<p>Whether or not this member is a leader. 1 if is, 0 otherwise.</p> |DEPENDENT |etcd.is.leader<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_is_leader `</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-|Etcd |Etcd: Server has a leader |<p>Whether or not a leader exists. 1 is existence, 0 is not.</p> |DEPENDENT |etcd.has.leader<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_has_leader `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-|Etcd |Etcd: Leader changes |<p>The the number of leader changes the member has seen since its start.</p> |DEPENDENT |etcd.leader.changes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_leader_changes_seen_total `</p> |
-|Etcd |Etcd: Proposals committed per second |<p>The number of consensus proposals committed.</p> |DEPENDENT |etcd.proposals.committed.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_proposals_committed_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Proposals applied per second |<p>The number of consensus proposals applied.</p> |DEPENDENT |etcd.proposals.applied.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_proposals_applied_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Proposals failed per second |<p>The number of failed proposals seen.</p> |DEPENDENT |etcd.proposals.failed.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_proposals_failed_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Proposals pending |<p>The current number of pending proposals to commit.</p> |DEPENDENT |etcd.proposals.pending<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_proposals_pending `</p> |
-|Etcd |Etcd: Reads per second |<p>Number of reads action by (get/getRecursive), local to this member.</p> |DEPENDENT |etcd.reads.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_debugging_store_reads_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Writes per second |<p>Number of writes (e.g. set/compareAndDelete) seen by this member.</p> |DEPENDENT |etcd.writes.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_debugging_store_writes_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Client gRPC received bytes per second |<p>The number of bytes received from grpc clients per second</p> |DEPENDENT |etcd.network.grpc.received.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_client_grpc_received_bytes_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Client gRPC sent bytes per second |<p>The number of bytes sent from grpc clients per second</p> |DEPENDENT |etcd.network.grpc.sent.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_client_grpc_sent_bytes_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: HTTP requests received |<p>Number of requests received into the system (successfully parsed and authd).</p> |DEPENDENT |etcd.http.requests.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_http_received_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: HTTP 5XX |<p>Number of handle failures of requests (non-watches), by method (GET/PUT etc.), and code 5XX.</p> |DEPENDENT |etcd.http.requests.5xx.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_http_failed_total{code=~"5.+"}`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: HTTP 4XX |<p>Number of handle failures of requests (non-watches), by method (GET/PUT etc.), and code 4XX.</p> |DEPENDENT |etcd.http.requests.4xx.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_http_failed_total{code=~"4.+"}`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: RPCs received per second |<p>The number of RPC stream messages received on the server.</p> |DEPENDENT |etcd.grpc.received.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `grpc_server_msg_received_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: RPCs sent per second |<p>The number of gRPC stream messages sent by the server.</p> |DEPENDENT |etcd.grpc.sent.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `grpc_server_msg_sent_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: RPCs started per second |<p>The number of RPCs started on the server.</p> |DEPENDENT |etcd.grpc.started.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `grpc_server_started_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
+|Etcd |Etcd: Node health |<p>-</p> |HTTP_AGENT |etcd.health<p>**Preprocessing**:</p><p>- JSONPATH: `$.health`</p><p>- BOOL_TO_DECIMAL</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|Etcd |Etcd: Server is a leader |<p>Whether or not this member is a leader. 1 if is, 0 otherwise.</p> |DEPENDENT |etcd.is.leader<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_is_leader`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|Etcd |Etcd: Server has a leader |<p>Whether or not a leader exists. 1 is existence, 0 is not.</p> |DEPENDENT |etcd.has.leader<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_has_leader`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|Etcd |Etcd: Leader changes |<p>The the number of leader changes the member has seen since its start.</p> |DEPENDENT |etcd.leader.changes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_leader_changes_seen_total`</p> |
+|Etcd |Etcd: Proposals committed per second |<p>The number of consensus proposals committed.</p> |DEPENDENT |etcd.proposals.committed.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_proposals_committed_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Proposals applied per second |<p>The number of consensus proposals applied.</p> |DEPENDENT |etcd.proposals.applied.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_proposals_applied_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Proposals failed per second |<p>The number of failed proposals seen.</p> |DEPENDENT |etcd.proposals.failed.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_proposals_failed_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Proposals pending |<p>The current number of pending proposals to commit.</p> |DEPENDENT |etcd.proposals.pending<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_server_proposals_pending`</p> |
+|Etcd |Etcd: Reads per second |<p>Number of reads action by (get/getRecursive), local to this member.</p> |DEPENDENT |etcd.reads.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_debugging_store_reads_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Writes per second |<p>Number of writes (e.g. set/compareAndDelete) seen by this member.</p> |DEPENDENT |etcd.writes.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_debugging_store_writes_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Client gRPC received bytes per second |<p>The number of bytes received from grpc clients per second.</p> |DEPENDENT |etcd.network.grpc.received.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_client_grpc_received_bytes_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Client gRPC sent bytes per second |<p>The number of bytes sent from grpc clients per second.</p> |DEPENDENT |etcd.network.grpc.sent.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_client_grpc_sent_bytes_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: HTTP requests received |<p>Number of requests received into the system (successfully parsed and authd).</p> |DEPENDENT |etcd.http.requests.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_http_received_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: HTTP 5XX |<p>Number of handle failures of requests (non-watches), by method (GET/PUT etc.), and code 5XX.</p> |DEPENDENT |etcd.http.requests.5xx.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_http_failed_total{code=~"5.+"}`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: HTTP 4XX |<p>Number of handle failures of requests (non-watches), by method (GET/PUT etc.), and code 4XX.</p> |DEPENDENT |etcd.http.requests.4xx.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `etcd_http_failed_total{code=~"4.+"}`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: RPCs received per second |<p>The number of RPC stream messages received on the server.</p> |DEPENDENT |etcd.grpc.received.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `grpc_server_msg_received_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: RPCs sent per second |<p>The number of gRPC stream messages sent by the server.</p> |DEPENDENT |etcd.grpc.sent.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `grpc_server_msg_sent_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: RPCs started per second |<p>The number of RPCs started on the server.</p> |DEPENDENT |etcd.grpc.started.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `grpc_server_started_total`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
|Etcd |Etcd: Server version |<p>Version of the Etcd server.</p> |DEPENDENT |etcd.server.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.etcdserver`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Etcd |Etcd: Cluster version |<p>Version of the Etcd cluster.</p> |DEPENDENT |etcd.cluster.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.etcdcluster`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Etcd |Etcd: DB size |<p>Total size of the underlying database.</p> |DEPENDENT |etcd.db.size<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_db_total_size_in_bytes `</p> |
-|Etcd |Etcd: Keys compacted per second |<p>The number of DB keys compacted per second.</p> |DEPENDENT |etcd.keys.compacted.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_db_compaction_keys_total `</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Keys expired per second |<p>The number of expired keys per second.</p> |DEPENDENT |etcd.keys.expired.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_store_expires_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Keys total |<p>Total number of keys.</p> |DEPENDENT |etcd.keys.total<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_keys_total `</p> |
-|Etcd |Etcd: Uptime |<p>Etcd server uptime.</p> |DEPENDENT |etcd.uptime<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_start_time_seconds `</p><p>- JAVASCRIPT: `//use boottime to calculate uptime return (Math.floor(Date.now()/1000)-Number(value));`</p> |
-|Etcd |Etcd: Virtual memory |<p>Virtual memory size in bytes.</p> |DEPENDENT |etcd.virtual.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_virtual_memory_bytes `</p> |
-|Etcd |Etcd: Resident memory |<p>Resident memory size in bytes.</p> |DEPENDENT |etcd.res.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_resident_memory_bytes `</p> |
-|Etcd |Etcd: CPU |<p>Total user and system CPU time spent in seconds.</p> |DEPENDENT |etcd.cpu.util<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_cpu_seconds_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Open file descriptors |<p>Number of open file descriptors.</p> |DEPENDENT |etcd.open.fds<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_open_fds `</p> |
-|Etcd |Etcd: Maximum open file descriptors |<p>The Maximum number of open file descriptors.</p> |DEPENDENT |etcd.max.fds<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_max_fds `</p> |
-|Etcd |Etcd: Deletes per second |<p>The number of deletes seen by this member per second.</p> |DEPENDENT |etcd.delete.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_delete_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: PUT per second |<p>The number of puts seen by this member per second.</p> |DEPENDENT |etcd.put.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_put_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Range per second |<p>The number of ranges seen by this member per second.</p> |DEPENDENT |etcd.range.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_range_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Transaction per second |<p>The number of transactions seen by this member per second.</p> |DEPENDENT |etcd.txn.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_range_total `</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Pending events |<p>Total number of pending events to be sent.</p> |DEPENDENT |etcd.events.sent.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_pending_events_total `</p> |
-|Etcd |Etcd: RPCs completed with code {#GRPC.CODE} |<p>The number of RPCs completed on the server with grpc_code {#GRPC.CODE}</p> |DEPENDENT |etcd.grpc.handled.rate[{#GRPC.CODE}]<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `grpc_server_handled_total{grpc_method="{#GRPC.CODE}"}`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Etcd peer {#ETCD.PEER}: Bytes sent |<p>The number of bytes sent to peer with ID {#ETCD.PEER}</p> |DEPENDENT |etcd.bytes.sent.rate[{#ETCD.PEER}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_peer_sent_bytes_total{To="{#ETCD.PEER}"} `</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Etcd peer {#ETCD.PEER}: Bytes received |<p>The number of bytes received from peer with ID {#ETCD.PEER}</p> |DEPENDENT |etcd.bytes.received.rate[{#ETCD.PEER}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_peer_received_bytes_total{From="{#ETCD.PEER}"} `</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Etcd peer {#ETCD.PEER}: Send failures |<p>The number of send failures from peer with ID {#ETCD.PEER}</p> |DEPENDENT |etcd.sent.fail.rate[{#ETCD.PEER}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_peer_sent_failures_total{To="{#ETCD.PEER}"} `</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Etcd |Etcd: Etcd peer {#ETCD.PEER}: Receive failures failures |<p>The number of receive failures from the peer with ID {#ETCD.PEER}</p> |DEPENDENT |etcd.received.fail.rate[{#ETCD.PEER}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_peer_received_failures_total{To="{#ETCD.PEER}"} `</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
+|Etcd |Etcd: DB size |<p>Total size of the underlying database.</p> |DEPENDENT |etcd.db.size<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_db_total_size_in_bytes`</p> |
+|Etcd |Etcd: Keys compacted per second |<p>The number of DB keys compacted per second.</p> |DEPENDENT |etcd.keys.compacted.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_db_compaction_keys_total`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Keys expired per second |<p>The number of expired keys per second.</p> |DEPENDENT |etcd.keys.expired.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_store_expires_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Keys total |<p>Total number of keys.</p> |DEPENDENT |etcd.keys.total<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_keys_total`</p> |
+|Etcd |Etcd: Uptime |<p>Etcd server uptime.</p> |DEPENDENT |etcd.uptime<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_start_time_seconds`</p><p>- JAVASCRIPT: `//use boottime to calculate uptime return (Math.floor(Date.now()/1000)-Number(value)); `</p> |
+|Etcd |Etcd: Virtual memory |<p>Virtual memory size in bytes.</p> |DEPENDENT |etcd.virtual.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_virtual_memory_bytes`</p> |
+|Etcd |Etcd: Resident memory |<p>Resident memory size in bytes.</p> |DEPENDENT |etcd.res.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_resident_memory_bytes`</p> |
+|Etcd |Etcd: CPU |<p>Total user and system CPU time spent in seconds.</p> |DEPENDENT |etcd.cpu.util<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_cpu_seconds_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Open file descriptors |<p>Number of open file descriptors.</p> |DEPENDENT |etcd.open.fds<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_open_fds`</p> |
+|Etcd |Etcd: Maximum open file descriptors |<p>The Maximum number of open file descriptors.</p> |DEPENDENT |etcd.max.fds<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_max_fds`</p> |
+|Etcd |Etcd: Deletes per second |<p>The number of deletes seen by this member per second.</p> |DEPENDENT |etcd.delete.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_delete_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: PUT per second |<p>The number of puts seen by this member per second.</p> |DEPENDENT |etcd.put.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_put_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Range per second |<p>The number of ranges seen by this member per second.</p> |DEPENDENT |etcd.range.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_range_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Transaction per second |<p>The number of transactions seen by this member per second.</p> |DEPENDENT |etcd.txn.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_range_total`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Pending events |<p>Total number of pending events to be sent.</p> |DEPENDENT |etcd.events.sent.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_debugging_mvcc_pending_events_total`</p> |
+|Etcd |Etcd: RPCs completed with code {#GRPC.CODE} |<p>The number of RPCs completed on the server with grpc_code {#GRPC.CODE}.</p> |DEPENDENT |etcd.grpc.handled.rate[{#GRPC.CODE}]<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `grpc_server_handled_total{grpc_method="{#GRPC.CODE}"}`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Etcd peer {#ETCD.PEER}: Bytes sent |<p>The number of bytes sent to peer with ID {#ETCD.PEER}.</p> |DEPENDENT |etcd.bytes.sent.rate[{#ETCD.PEER}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_peer_sent_bytes_total{To="{#ETCD.PEER}"}`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Etcd peer {#ETCD.PEER}: Bytes received |<p>The number of bytes received from peer with ID {#ETCD.PEER}.</p> |DEPENDENT |etcd.bytes.received.rate[{#ETCD.PEER}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_peer_received_bytes_total{From="{#ETCD.PEER}"}`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Etcd peer {#ETCD.PEER}: Send failures |<p>The number of send failures from peer with ID {#ETCD.PEER}.</p> |DEPENDENT |etcd.sent.fail.rate[{#ETCD.PEER}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_peer_sent_failures_total{To="{#ETCD.PEER}"}`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Etcd |Etcd: Etcd peer {#ETCD.PEER}: Receive failures failures |<p>The number of receive failures from the peer with ID {#ETCD.PEER}.</p> |DEPENDENT |etcd.received.fail.rate[{#ETCD.PEER}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `etcd_network_peer_received_failures_total{To="{#ETCD.PEER}"}`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
|Zabbix_raw_items |Etcd: Get node metrics |<p>-</p> |HTTP_AGENT |etcd.get_metrics |
|Zabbix_raw_items |Etcd: Get version |<p>-</p> |HTTP_AGENT |etcd.get_version |
@@ -124,16 +124,16 @@ There are no template links in this template.
|----|-----------|----|----|----|
|Etcd: Service is unavailable |<p>-</p> |`last(/Etcd by HTTP/net.tcp.service["{$ETCD.SCHEME}","{HOST.CONN}","{$ETCD.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
|Etcd: Node healthcheck failed |<p>https://etcd.io/docs/v3.4.0/op-guide/monitoring/#health-check</p> |`last(/Etcd by HTTP/etcd.health)=0` |AVERAGE |<p>**Depends on**:</p><p>- Etcd: Service is unavailable</p> |
-|Etcd: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`nodata(/Etcd by HTTP/etcd.is.leader,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Etcd: Service is unavailable</p> |
-|Etcd: Member has no leader |<p>"If a member does not have a leader, it is totally unavailable."</p> |`last(/Etcd by HTTP/etcd.has.leader)=0` |AVERAGE | |
+|Etcd: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/Etcd by HTTP/etcd.is.leader,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Etcd: Service is unavailable</p> |
+|Etcd: Member has no leader |<p>If a member does not have a leader, it is totally unavailable.</p> |`last(/Etcd by HTTP/etcd.has.leader)=0` |AVERAGE | |
|Etcd: Instance has seen too many leader changes (over {$ETCD.LEADER.CHANGES.MAX.WARN} for 15m)' |<p>Rapid leadership changes impact the performance of etcd significantly. It also signals that the leader is unstable, perhaps due to network connectivity issues or excessive load hitting the etcd cluster.</p> |`(max(/Etcd by HTTP/etcd.leader.changes,15m)-min(/Etcd by HTTP/etcd.leader.changes,15m))>{$ETCD.LEADER.CHANGES.MAX.WARN}` |WARNING | |
|Etcd: Too many proposal failures (over {$ETCD.PROPOSAL.FAIL.MAX.WARN} for 5m)' |<p>Normally related to two issues: temporary failures related to a leader election or longer downtime caused by a loss of quorum in the cluster.</p> |`min(/Etcd by HTTP/etcd.proposals.failed.rate,5m)>{$ETCD.PROPOSAL.FAIL.MAX.WARN}` |WARNING | |
-|Etcd: Too many proposals are queued to commit (over {$ETCD.PROPOSAL.PENDING.MAX.WARN} for 5m)' |<p>"Rising pending proposals suggests there is a high client load or the member cannot commit proposals."</p> |`min(/Etcd by HTTP/etcd.proposals.pending,5m)>{$ETCD.PROPOSAL.PENDING.MAX.WARN}` |WARNING | |
-|Etcd: Too many HTTP requests failures (over {$ETCD.HTTP.FAIL.MAX.WARN} for 5m)' |<p>"Too many reqvests failed on etcd instance with 5xx HTTP code"</p> |`min(/Etcd by HTTP/etcd.http.requests.5xx.rate,5m)>{$ETCD.HTTP.FAIL.MAX.WARN}` |WARNING | |
+|Etcd: Too many proposals are queued to commit (over {$ETCD.PROPOSAL.PENDING.MAX.WARN} for 5m)' |<p>Rising pending proposals suggests there is a high client load or the member cannot commit proposals.</p> |`min(/Etcd by HTTP/etcd.proposals.pending,5m)>{$ETCD.PROPOSAL.PENDING.MAX.WARN}` |WARNING | |
+|Etcd: Too many HTTP requests failures (over {$ETCD.HTTP.FAIL.MAX.WARN} for 5m)' |<p>Too many reqvests failed on etcd instance with 5xx HTTP code.</p> |`min(/Etcd by HTTP/etcd.http.requests.5xx.rate,5m)>{$ETCD.HTTP.FAIL.MAX.WARN}` |WARNING | |
|Etcd: Server version has changed (new version: {ITEM.VALUE}) |<p>Etcd version has changed. Ack to close.</p> |`last(/Etcd by HTTP/etcd.server.version,#1)<>last(/Etcd by HTTP/etcd.server.version,#2) and length(last(/Etcd by HTTP/etcd.server.version))>0` |INFO |<p>Manual close: YES</p> |
|Etcd: Cluster version has changed (new version: {ITEM.VALUE}) |<p>Etcd version has changed. Ack to close.</p> |`last(/Etcd by HTTP/etcd.cluster.version,#1)<>last(/Etcd by HTTP/etcd.cluster.version,#2) and length(last(/Etcd by HTTP/etcd.cluster.version))>0` |INFO |<p>Manual close: YES</p> |
|Etcd: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Etcd by HTTP/etcd.uptime)<10m` |INFO |<p>Manual close: YES</p> |
-|Etcd: Current number of open files is too high (over {$ETCD.OPEN.FDS.MAX.WARN}% for 5m) |<p>Heavy file descriptor usage (i.e., near the process’s file descriptor limit) indicates a potential file descriptor exhaustion issue. If the file descriptors are exhausted, etcd may panic because it cannot create new WAL files.</p> |`min(/Etcd by HTTP/etcd.open.fds,5m)/last(/Etcd by HTTP/etcd.max.fds)*100>{$ETCD.OPEN.FDS.MAX.WARN}` |WARNING | |
+|Etcd: Current number of open files is too high (over {$ETCD.OPEN.FDS.MAX.WARN}% for 5m) |<p>Heavy file descriptor usage (i.e., near the process's file descriptor limit) indicates a potential file descriptor exhaustion issue.</p><p>If the file descriptors are exhausted, etcd may panic because it cannot create new WAL files.</p> |`min(/Etcd by HTTP/etcd.open.fds,5m)/last(/Etcd by HTTP/etcd.max.fds)*100>{$ETCD.OPEN.FDS.MAX.WARN}` |WARNING | |
|Etcd: Too many failed gRPC requests with code: {#GRPC.CODE} (over {$ETCD.GRPC.ERRORS.MAX.WARN} in 5m) |<p>-</p> |`min(/Etcd by HTTP/etcd.grpc.handled.rate[{#GRPC.CODE}],5m)>{$ETCD.GRPC.ERRORS.MAX.WARN}` |WARNING | |
## Feedback
diff --git a/templates/app/etcd_http/template_app_etcd_http.yaml b/templates/app/etcd_http/template_app_etcd_http.yaml
index 72fcae2eec8..335ca8a08d9 100644
--- a/templates/app/etcd_http/template_app_etcd_http.yaml
+++ b/templates/app/etcd_http/template_app_etcd_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-12-09T12:22:05Z'
+ date: '2021-12-19T15:19:34Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -305,7 +305,7 @@ zabbix_export:
expression: 'last(/Etcd by HTTP/etcd.has.leader)=0'
name: 'Etcd: Member has no leader'
priority: AVERAGE
- description: '"If a member does not have a leader, it is totally unavailable."'
+ description: 'If a member does not have a leader, it is totally unavailable.'
-
uuid: 3fde4db8b9684ba4b56ba915e48957b5
name: 'Etcd: Node health'
@@ -421,7 +421,7 @@ zabbix_export:
expression: 'min(/Etcd by HTTP/etcd.http.requests.5xx.rate,5m)>{$ETCD.HTTP.FAIL.MAX.WARN}'
name: 'Etcd: Too many HTTP requests failures (over {$ETCD.HTTP.FAIL.MAX.WARN} for 5m)'''
priority: WARNING
- description: '"Too many reqvests failed on etcd instance with 5xx HTTP code"'
+ description: 'Too many reqvests failed on etcd instance with 5xx HTTP code.'
-
uuid: 2a19db1c58ee4a509061fcb1b557c1a3
name: 'Etcd: HTTP requests received'
@@ -489,7 +489,7 @@ zabbix_export:
expression: 'nodata(/Etcd by HTTP/etcd.is.leader,30m)=1'
name: 'Etcd: Failed to fetch info data (or no data for 30m)'
priority: WARNING
- description: 'Zabbix has not received data for items for the last 30 minutes'
+ description: 'Zabbix has not received data for items for the last 30 minutes.'
manual_close: 'YES'
dependencies:
-
@@ -624,7 +624,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: Bps
- description: 'The number of bytes received from grpc clients per second'
+ description: 'The number of bytes received from grpc clients per second.'
preprocessing:
-
type: PROMETHEUS_PATTERN
@@ -650,7 +650,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: Bps
- description: 'The number of bytes sent from grpc clients per second'
+ description: 'The number of bytes sent from grpc clients per second.'
preprocessing:
-
type: PROMETHEUS_PATTERN
@@ -796,7 +796,7 @@ zabbix_export:
expression: 'min(/Etcd by HTTP/etcd.proposals.pending,5m)>{$ETCD.PROPOSAL.PENDING.MAX.WARN}'
name: 'Etcd: Too many proposals are queued to commit (over {$ETCD.PROPOSAL.PENDING.MAX.WARN} for 5m)'''
priority: WARNING
- description: '"Rising pending proposals suggests there is a high client load or the member cannot commit proposals."'
+ description: 'Rising pending proposals suggests there is a high client load or the member cannot commit proposals.'
-
uuid: bd7398507c274bfab53339380df16761
name: 'Etcd: PUT per second'
@@ -1108,7 +1108,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: rps
- description: 'The number of RPCs completed on the server with grpc_code {#GRPC.CODE}'
+ description: 'The number of RPCs completed on the server with grpc_code {#GRPC.CODE}.'
preprocessing:
-
type: PROMETHEUS_TO_JSON
@@ -1197,7 +1197,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: Bps
- description: 'The number of bytes received from peer with ID {#ETCD.PEER}'
+ description: 'The number of bytes received from peer with ID {#ETCD.PEER}.'
preprocessing:
-
type: PROMETHEUS_PATTERN
@@ -1225,7 +1225,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: Bps
- description: 'The number of bytes sent to peer with ID {#ETCD.PEER}'
+ description: 'The number of bytes sent to peer with ID {#ETCD.PEER}.'
preprocessing:
-
type: PROMETHEUS_PATTERN
@@ -1253,7 +1253,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: rps
- description: 'The number of receive failures from the peer with ID {#ETCD.PEER}'
+ description: 'The number of receive failures from the peer with ID {#ETCD.PEER}.'
preprocessing:
-
type: PROMETHEUS_PATTERN
@@ -1281,7 +1281,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: rps
- description: 'The number of send failures from peer with ID {#ETCD.PEER}'
+ description: 'The number of send failures from peer with ID {#ETCD.PEER}.'
preprocessing:
-
type: PROMETHEUS_PATTERN
@@ -1315,49 +1315,49 @@ zabbix_export:
-
macro: '{$ETCD.GRPC.ERRORS.MAX.WARN}'
value: '1'
- description: 'Maximum number of gRPC requests failures'
+ description: 'Maximum number of gRPC requests failures.'
-
macro: '{$ETCD.GRPC_CODE.MATCHES}'
value: '.*'
- description: 'Filter of discoverable gRPC codes https://github.com/grpc/grpc/blob/master/doc/statuscodes.md'
+ description: 'Filter of discoverable gRPC codes https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.'
-
macro: '{$ETCD.GRPC_CODE.NOT_MATCHES}'
value: CHANGE_IF_NEEDED
- description: 'Filter to exclude discovered gRPC codes https://github.com/grpc/grpc/blob/master/doc/statuscodes.md'
+ description: 'Filter to exclude discovered gRPC codes https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.'
-
macro: '{$ETCD.GRPC_CODE.TRIGGER.MATCHES}'
value: Aborted|Unavailable
- description: 'Filter of discoverable gRPC codes which will be create triggers'
+ description: 'Filter of discoverable gRPC codes which will be create triggers.'
-
macro: '{$ETCD.HTTP.FAIL.MAX.WARN}'
value: '2'
- description: 'Maximum number of HTTP requests failures'
+ description: 'Maximum number of HTTP requests failures.'
-
macro: '{$ETCD.LEADER.CHANGES.MAX.WARN}'
value: '5'
- description: 'Maximum number of leader changes'
+ description: 'Maximum number of leader changes.'
-
macro: '{$ETCD.OPEN.FDS.MAX.WARN}'
value: '90'
- description: 'Maximum percentage of used file descriptors'
+ description: 'Maximum percentage of used file descriptors.'
-
macro: '{$ETCD.PASSWORD}'
-
macro: '{$ETCD.PORT}'
value: '2379'
- description: 'The port of Etcd API endpoint'
+ description: 'The port of Etcd API endpoint.'
-
macro: '{$ETCD.PROPOSAL.FAIL.MAX.WARN}'
value: '2'
- description: 'Maximum number of proposal failures'
+ description: 'Maximum number of proposal failures.'
-
macro: '{$ETCD.PROPOSAL.PENDING.MAX.WARN}'
value: '5'
- description: 'Maximum number of proposals in queue'
+ description: 'Maximum number of proposals in queue.'
-
macro: '{$ETCD.SCHEME}'
value: http
- description: 'Request scheme which may be http or https'
+ description: 'Request scheme which may be http or https.'
-
macro: '{$ETCD.USER}'
valuemaps:
@@ -1397,7 +1397,9 @@ zabbix_export:
expression: 'min(/Etcd by HTTP/etcd.open.fds,5m)/last(/Etcd by HTTP/etcd.max.fds)*100>{$ETCD.OPEN.FDS.MAX.WARN}'
name: 'Etcd: Current number of open files is too high (over {$ETCD.OPEN.FDS.MAX.WARN}% for 5m)'
priority: WARNING
- description: 'Heavy file descriptor usage (i.e., near the process’s file descriptor limit) indicates a potential file descriptor exhaustion issue. If the file descriptors are exhausted, etcd may panic because it cannot create new WAL files.'
+ description: |
+ Heavy file descriptor usage (i.e., near the process's file descriptor limit) indicates a potential file descriptor exhaustion issue.
+ If the file descriptors are exhausted, etcd may panic because it cannot create new WAL files.
graphs:
-
uuid: 18baccd03c0f4814a42d32b51334787d
diff --git a/templates/app/exchange/README.md b/templates/app/exchange/README.md
index 2ea0b0201dd..7c6b1b4fa71 100644
--- a/templates/app/exchange/README.md
+++ b/templates/app/exchange/README.md
@@ -10,7 +10,6 @@ Official Template for Microsoft Exchange Server 2016.
This template was tested on:
- Microsoft Exchange Server, version 2016 CU18
-- Zabbix, version 5.4
## Setup
@@ -60,7 +59,7 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Databases discovery |<p>Discovery of Exchange databases.</p> |ZABBIX_PASSIVE |perf_instance.discovery["MSExchange Active Manager"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
+|Databases discovery |<p>Discovery of Exchange databases.</p> |ZABBIX_PASSIVE |perf_instance.discovery["MSExchange Active Manager"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
|Web services discovery |<p>Discovery of Exchange web services.</p> |ZABBIX_PASSIVE |perf_instance_en.discovery["Web Service"] |
|LDAP discovery |<p>Discovery of domain controller.</p> |ZABBIX_PASSIVE |perf_instance_en.discovery["MSExchange ADAccess Domain Controllers"] |
@@ -100,16 +99,16 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Information Store [{#INSTANCE}]: Page faults is too high (>{$MS.EXCHANGE.DB.FAULTS.WARN} for {$MS.EXCHANGE.DB.FAULTS.TIME}) |<p>Too much page faults stalls for database "{#INSTANCE}". This counter should be 0 on production servers.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database({#INF.STORE})\Database Page Fault Stalls/sec", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.DB.FAULTS.TIME})}>{$MS.EXCHANGE.DB.FAULTS.WARN}` |AVERAGE | |
-|Information Store [{#INSTANCE}]: Log records stalls is too high (>{$MS.EXCHANGE.LOG.STALLS.WARN} for {$MS.EXCHANGE.LOG.STALLS.TIME}) |<p>Stalled log records too high. The average value should be less than 10 threads waiting.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database({#INF.STORE})\Log Record Stalls/sec", {$MS.EXCHANGE.PERF.INTERVAL}].avg({$MS.EXCHANGE.LOG.STALLS.TIME})}>{$MS.EXCHANGE.LOG.STALLS.WARN}` |AVERAGE | |
-|Information Store [{#INSTANCE}]: RPC Requests latency is too high (>{$MS.EXCHANGE.RPC.WARN}s for {$MS.EXCHANGE.RPC.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchangeIS Store({#INSTANCE})\RPC Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.RPC.TIME})}>{$MS.EXCHANGE.RPC.WARN}` |WARNING | |
-|Information Store [{#INSTANCE}]: RPC Requests total count is too high (>{$MS.EXCHANGE.RPC.COUNT.WARN} for {$MS.EXCHANGE.RPC.COUNT.TIME}) |<p>Should be below 70 at all times.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchangeIS Store({#INSTANCE})\RPC requests", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.RPC.COUNT.TIME})}>{$MS.EXCHANGE.RPC.COUNT.WARN}` |WARNING | |
-|Database Counters [{#INSTANCE}]: Average read time latency is too high (>{$MS.EXCHANGE.DB.ACTIVE.READ.WARN}s for {$MS.EXCHANGE.DB.ACTIVE.READ.TIME}) |<p>Should be less than 20ms on average.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Attached) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.DB.ACTIVE.READ.TIME})}>{$MS.EXCHANGE.DB.ACTIVE.READ.WARN}` |WARNING | |
-|Database Counters [{#INSTANCE}]: Average read time latency is too high (>{$MS.EXCHANGE.DB.PASSIVE.READ.WARN}s for {$MS.EXCHANGE.DB.PASSIVE.READ.TIME}) |<p>Should be less than 200ms on average.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.DB.PASSIVE.READ.TIME})}>{$MS.EXCHANGE.DB.PASSIVE.READ.WARN}` |WARNING | |
-|Database Counters [{#INSTANCE}]: Average write time latency is too high for {$MS.EXCHANGE.DB.ACTIVE.WRITE.TIME} |<p>Should be less than 50ms on average.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Writes (Attached) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.DB.ACTIVE.WRITE.TIME})}>{$MS.EXCHANGE.DB.ACTIVE.WRITE.WARN}` |WARNING | |
-|Database Counters [{#INSTANCE}]: Average write time latency is higher than read time latency for {$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME} |<p>Should be less than the read latency for the same instance, as measured by the MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency counter.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Writes (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].avg({$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME})}>{Microsoft Exchange Server 2016 by Zabbix agent:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].avg({$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME})}` |WARNING | |
-|Domain Controller [{#INSTANCE}]: LDAP read time is too high (>{$MS.EXCHANGE.LDAP.WARN}s for {$MS.EXCHANGE.LDAP.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange ADAccess Domain Controllers({#INSTANCE})\LDAP Read Time", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.LDAP.TIME})}>{$MS.EXCHANGE.LDAP.WARN}` |AVERAGE | |
-|Domain Controller [{#INSTANCE}]: LDAP search time is too high (>{$MS.EXCHANGE.LDAP.WARN}s for {$MS.EXCHANGE.LDAP.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange ADAccess Domain Controllers({#INSTANCE})\LDAP Search Time", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.LDAP.TIME})}>{$MS.EXCHANGE.LDAP.WARN}` |AVERAGE | |
+|Information Store [{#INSTANCE}]: Page faults is too high (>{$MS.EXCHANGE.DB.FAULTS.WARN} for {$MS.EXCHANGE.DB.FAULTS.TIME}) |<p>Too much page faults stalls for database "{#INSTANCE}". This counter should be 0 on production servers.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange Database({#INF.STORE})\Database Page Fault Stalls/sec", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.FAULTS.TIME})>{$MS.EXCHANGE.DB.FAULTS.WARN}` |AVERAGE | |
+|Information Store [{#INSTANCE}]: Log records stalls is too high (>{$MS.EXCHANGE.LOG.STALLS.WARN} for {$MS.EXCHANGE.LOG.STALLS.TIME}) |<p>Stalled log records too high. The average value should be less than 10 threads waiting.</p> |`avg(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange Database({#INF.STORE})\Log Record Stalls/sec", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.LOG.STALLS.TIME})>{$MS.EXCHANGE.LOG.STALLS.WARN}` |AVERAGE | |
+|Information Store [{#INSTANCE}]: RPC Requests latency is too high (>{$MS.EXCHANGE.RPC.WARN}s for {$MS.EXCHANGE.RPC.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchangeIS Store({#INSTANCE})\RPC Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.RPC.TIME})>{$MS.EXCHANGE.RPC.WARN}` |WARNING | |
+|Information Store [{#INSTANCE}]: RPC Requests total count is too high (>{$MS.EXCHANGE.RPC.COUNT.WARN} for {$MS.EXCHANGE.RPC.COUNT.TIME}) |<p>Should be below 70 at all times.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchangeIS Store({#INSTANCE})\RPC requests", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.RPC.COUNT.TIME})>{$MS.EXCHANGE.RPC.COUNT.WARN}` |WARNING | |
+|Database Counters [{#INSTANCE}]: Average read time latency is too high (>{$MS.EXCHANGE.DB.ACTIVE.READ.WARN}s for {$MS.EXCHANGE.DB.ACTIVE.READ.TIME}) |<p>Should be less than 20ms on average.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Attached) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.ACTIVE.READ.TIME})>{$MS.EXCHANGE.DB.ACTIVE.READ.WARN}` |WARNING | |
+|Database Counters [{#INSTANCE}]: Average read time latency is too high (>{$MS.EXCHANGE.DB.PASSIVE.READ.WARN}s for {$MS.EXCHANGE.DB.PASSIVE.READ.TIME}) |<p>Should be less than 200ms on average.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.PASSIVE.READ.TIME})>{$MS.EXCHANGE.DB.PASSIVE.READ.WARN}` |WARNING | |
+|Database Counters [{#INSTANCE}]: Average write time latency is too high for {$MS.EXCHANGE.DB.ACTIVE.WRITE.TIME} |<p>Should be less than 50ms on average.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Writes (Attached) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.ACTIVE.WRITE.TIME})>{$MS.EXCHANGE.DB.ACTIVE.WRITE.WARN}` |WARNING | |
+|Database Counters [{#INSTANCE}]: Average write time latency is higher than read time latency for {$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME} |<p>Should be less than the read latency for the same instance, as measured by the MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency counter.</p> |`avg(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Writes (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME})>avg(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME})` |WARNING | |
+|Domain Controller [{#INSTANCE}]: LDAP read time is too high (>{$MS.EXCHANGE.LDAP.WARN}s for {$MS.EXCHANGE.LDAP.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange ADAccess Domain Controllers({#INSTANCE})\LDAP Read Time", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.LDAP.TIME})>{$MS.EXCHANGE.LDAP.WARN}` |AVERAGE | |
+|Domain Controller [{#INSTANCE}]: LDAP search time is too high (>{$MS.EXCHANGE.LDAP.WARN}s for {$MS.EXCHANGE.LDAP.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent/perf_counter_en["\MSExchange ADAccess Domain Controllers({#INSTANCE})\LDAP Search Time", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.LDAP.TIME})>{$MS.EXCHANGE.LDAP.WARN}` |AVERAGE | |
## Feedback
diff --git a/templates/app/exchange/template_app_exchange.yaml b/templates/app/exchange/template_app_exchange.yaml
index 7d89376751c..494e87c481e 100644
--- a/templates/app/exchange/template_app_exchange.yaml
+++ b/templates/app/exchange/template_app_exchange.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:16Z'
+ date: '2021-12-19T15:19:35Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -18,7 +18,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/415007-discussion-thread-for-official-zabbix-template-microsoft-exchange
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/exchange_active/README.md b/templates/app/exchange_active/README.md
index 3cf9ca99955..7d35eb39989 100644
--- a/templates/app/exchange_active/README.md
+++ b/templates/app/exchange_active/README.md
@@ -10,7 +10,6 @@ Official Template for Microsoft Exchange Server 2016.
This template was tested on:
- Microsoft Exchange Server, version 2016 CU18
-- Zabbix, version 5.4
## Setup
@@ -60,7 +59,7 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Databases discovery |<p>Discovery of Exchange databases.</p> |ZABBIX_ACTIVE |perf_instance.discovery["MSExchange Active Manager"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
+|Databases discovery |<p>Discovery of Exchange databases.</p> |ZABBIX_ACTIVE |perf_instance.discovery["MSExchange Active Manager"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
|Web services discovery |<p>Discovery of Exchange web services.</p> |ZABBIX_ACTIVE |perf_instance_en.discovery["Web Service"] |
|LDAP discovery |<p>Discovery of domain controller.</p> |ZABBIX_ACTIVE |perf_instance_en.discovery["MSExchange ADAccess Domain Controllers"] |
@@ -100,16 +99,16 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Information Store [{#INSTANCE}]: Page faults is too high (>{$MS.EXCHANGE.DB.FAULTS.WARN} for {$MS.EXCHANGE.DB.FAULTS.TIME}) |<p>Too much page faults stalls for database "{#INSTANCE}". This counter should be 0 on production servers.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database({#INF.STORE})\Database Page Fault Stalls/sec", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.DB.FAULTS.TIME})}>{$MS.EXCHANGE.DB.FAULTS.WARN}` |AVERAGE | |
-|Information Store [{#INSTANCE}]: Log records stalls is too high (>{$MS.EXCHANGE.LOG.STALLS.WARN} for {$MS.EXCHANGE.LOG.STALLS.TIME}) |<p>Stalled log records too high. The average value should be less than 10 threads waiting.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database({#INF.STORE})\Log Record Stalls/sec", {$MS.EXCHANGE.PERF.INTERVAL}].avg({$MS.EXCHANGE.LOG.STALLS.TIME})}>{$MS.EXCHANGE.LOG.STALLS.WARN}` |AVERAGE | |
-|Information Store [{#INSTANCE}]: RPC Requests latency is too high (>{$MS.EXCHANGE.RPC.WARN}s for {$MS.EXCHANGE.RPC.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchangeIS Store({#INSTANCE})\RPC Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.RPC.TIME})}>{$MS.EXCHANGE.RPC.WARN}` |WARNING | |
-|Information Store [{#INSTANCE}]: RPC Requests total count is too high (>{$MS.EXCHANGE.RPC.COUNT.WARN} for {$MS.EXCHANGE.RPC.COUNT.TIME}) |<p>Should be below 70 at all times.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchangeIS Store({#INSTANCE})\RPC requests", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.RPC.COUNT.TIME})}>{$MS.EXCHANGE.RPC.COUNT.WARN}` |WARNING | |
-|Database Counters [{#INSTANCE}]: Average read time latency is too high (>{$MS.EXCHANGE.DB.ACTIVE.READ.WARN}s for {$MS.EXCHANGE.DB.ACTIVE.READ.TIME}) |<p>Should be less than 20ms on average.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Attached) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.DB.ACTIVE.READ.TIME})}>{$MS.EXCHANGE.DB.ACTIVE.READ.WARN}` |WARNING | |
-|Database Counters [{#INSTANCE}]: Average read time latency is too high (>{$MS.EXCHANGE.DB.PASSIVE.READ.WARN}s for {$MS.EXCHANGE.DB.PASSIVE.READ.TIME}) |<p>Should be less than 200ms on average.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.DB.PASSIVE.READ.TIME})}>{$MS.EXCHANGE.DB.PASSIVE.READ.WARN}` |WARNING | |
-|Database Counters [{#INSTANCE}]: Average write time latency is too high for {$MS.EXCHANGE.DB.ACTIVE.WRITE.TIME} |<p>Should be less than 50ms on average.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Writes (Attached) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.DB.ACTIVE.WRITE.TIME})}>{$MS.EXCHANGE.DB.ACTIVE.WRITE.WARN}` |WARNING | |
-|Database Counters [{#INSTANCE}]: Average write time latency is higher than read time latency for {$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME} |<p>Should be less than the read latency for the same instance, as measured by the MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency counter.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Writes (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].avg({$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME})}>{Microsoft Exchange Server 2016 by Zabbix agent active:perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}].avg({$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME})}` |WARNING | |
-|Domain Controller [{#INSTANCE}]: LDAP read time is too high (>{$MS.EXCHANGE.LDAP.WARN}s for {$MS.EXCHANGE.LDAP.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange ADAccess Domain Controllers({#INSTANCE})\LDAP Read Time", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.LDAP.TIME})}>{$MS.EXCHANGE.LDAP.WARN}` |AVERAGE | |
-|Domain Controller [{#INSTANCE}]: LDAP search time is too high (>{$MS.EXCHANGE.LDAP.WARN}s for {$MS.EXCHANGE.LDAP.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`{TEMPLATE_NAME:perf_counter_en["\MSExchange ADAccess Domain Controllers({#INSTANCE})\LDAP Search Time", {$MS.EXCHANGE.PERF.INTERVAL}].min({$MS.EXCHANGE.LDAP.TIME})}>{$MS.EXCHANGE.LDAP.WARN}` |AVERAGE | |
+|Information Store [{#INSTANCE}]: Page faults is too high (>{$MS.EXCHANGE.DB.FAULTS.WARN} for {$MS.EXCHANGE.DB.FAULTS.TIME}) |<p>Too much page faults stalls for database "{#INSTANCE}". This counter should be 0 on production servers.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange Database({#INF.STORE})\Database Page Fault Stalls/sec", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.FAULTS.TIME})>{$MS.EXCHANGE.DB.FAULTS.WARN}` |AVERAGE | |
+|Information Store [{#INSTANCE}]: Log records stalls is too high (>{$MS.EXCHANGE.LOG.STALLS.WARN} for {$MS.EXCHANGE.LOG.STALLS.TIME}) |<p>Stalled log records too high. The average value should be less than 10 threads waiting.</p> |`avg(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange Database({#INF.STORE})\Log Record Stalls/sec", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.LOG.STALLS.TIME})>{$MS.EXCHANGE.LOG.STALLS.WARN}` |AVERAGE | |
+|Information Store [{#INSTANCE}]: RPC Requests latency is too high (>{$MS.EXCHANGE.RPC.WARN}s for {$MS.EXCHANGE.RPC.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchangeIS Store({#INSTANCE})\RPC Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.RPC.TIME})>{$MS.EXCHANGE.RPC.WARN}` |WARNING | |
+|Information Store [{#INSTANCE}]: RPC Requests total count is too high (>{$MS.EXCHANGE.RPC.COUNT.WARN} for {$MS.EXCHANGE.RPC.COUNT.TIME}) |<p>Should be below 70 at all times.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchangeIS Store({#INSTANCE})\RPC requests", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.RPC.COUNT.TIME})>{$MS.EXCHANGE.RPC.COUNT.WARN}` |WARNING | |
+|Database Counters [{#INSTANCE}]: Average read time latency is too high (>{$MS.EXCHANGE.DB.ACTIVE.READ.WARN}s for {$MS.EXCHANGE.DB.ACTIVE.READ.TIME}) |<p>Should be less than 20ms on average.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Attached) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.ACTIVE.READ.TIME})>{$MS.EXCHANGE.DB.ACTIVE.READ.WARN}` |WARNING | |
+|Database Counters [{#INSTANCE}]: Average read time latency is too high (>{$MS.EXCHANGE.DB.PASSIVE.READ.WARN}s for {$MS.EXCHANGE.DB.PASSIVE.READ.TIME}) |<p>Should be less than 200ms on average.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.PASSIVE.READ.TIME})>{$MS.EXCHANGE.DB.PASSIVE.READ.WARN}` |WARNING | |
+|Database Counters [{#INSTANCE}]: Average write time latency is too high for {$MS.EXCHANGE.DB.ACTIVE.WRITE.TIME} |<p>Should be less than 50ms on average.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Writes (Attached) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.ACTIVE.WRITE.TIME})>{$MS.EXCHANGE.DB.ACTIVE.WRITE.WARN}` |WARNING | |
+|Database Counters [{#INSTANCE}]: Average write time latency is higher than read time latency for {$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME} |<p>Should be less than the read latency for the same instance, as measured by the MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency counter.</p> |`avg(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Writes (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME})>avg(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange Database ==> Instances({#INF.STORE}/_Total)\I/O Database Reads (Recovery) Average Latency", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.DB.PASSIVE.WRITE.TIME})` |WARNING | |
+|Domain Controller [{#INSTANCE}]: LDAP read time is too high (>{$MS.EXCHANGE.LDAP.WARN}s for {$MS.EXCHANGE.LDAP.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange ADAccess Domain Controllers({#INSTANCE})\LDAP Read Time", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.LDAP.TIME})>{$MS.EXCHANGE.LDAP.WARN}` |AVERAGE | |
+|Domain Controller [{#INSTANCE}]: LDAP search time is too high (>{$MS.EXCHANGE.LDAP.WARN}s for {$MS.EXCHANGE.LDAP.TIME}) |<p>Should be less than 50ms at all times, with spikes less than 100ms.</p> |`min(/Microsoft Exchange Server 2016 by Zabbix agent active/perf_counter_en["\MSExchange ADAccess Domain Controllers({#INSTANCE})\LDAP Search Time", {$MS.EXCHANGE.PERF.INTERVAL}],{$MS.EXCHANGE.LDAP.TIME})>{$MS.EXCHANGE.LDAP.WARN}` |AVERAGE | |
## Feedback
diff --git a/templates/app/exchange_active/template_app_exchange_active.yaml b/templates/app/exchange_active/template_app_exchange_active.yaml
index 40c0d528907..732006e531f 100644
--- a/templates/app/exchange_active/template_app_exchange_active.yaml
+++ b/templates/app/exchange_active/template_app_exchange_active.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:24Z'
+ date: '2021-12-19T15:19:36Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -18,7 +18,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/415007-discussion-thread-for-official-zabbix-template-microsoft-exchange
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/generic_java_jmx/README.md b/templates/app/generic_java_jmx/README.md
index 33ee31493f0..55daf39d679 100644
--- a/templates/app/generic_java_jmx/README.md
+++ b/templates/app/generic_java_jmx/README.md
@@ -18,18 +18,18 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|-------------------------------|--------------------------------------------------------------------------------------------------------------------------------|---------|
-| {$JMX.CPU.LOAD.MAX} | <p>A threshold in percent for CPU utilization trigger.</p> | `85` |
-| {$JMX.CPU.LOAD.TIME} | <p>The time during which the CPU utilization may exceed the threshold.</p> | `5m` |
-| {$JMX.FILE.DESCRIPTORS.MAX} | <p>A threshold in percent for file descriptors count trigger.</p> | `85` |
-| {$JMX.FILE.DESCRIPTORS.TIME} | <p>The time during which the file descriptors count may exceed the threshold.</p> | `3m` |
-| {$JMX.HEAP.MEM.USAGE.MAX} | <p>A threshold in percent for Heap memory utilization trigger.</p> | `85` |
-| {$JMX.HEAP.MEM.USAGE.TIME} | <p>The time during which the Heap memory utilization may exceed the threshold.</p> | `10m` |
-| {$JMX.MP.USAGE.MAX} | <p>A threshold in percent for memory pools utilization trigger. Use a context to change the threshold for a specific pool.</p> | `85` |
-| {$JMX.MP.USAGE.TIME} | <p>The time during which the memory pools utilization may exceed the threshold.</p> | `10m` |
-| {$JMX.NONHEAP.MEM.USAGE.MAX} | <p>A threshold in percent for Non-heap memory utilization trigger.</p> | `85` |
-| {$JMX.NONHEAP.MEM.USAGE.TIME} | <p>The time during which the Non-heap memory utilization may exceed the threshold.</p> | `10m` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$JMX.CPU.LOAD.MAX} |<p>A threshold in percent for CPU utilization trigger.</p> |`85` |
+|{$JMX.CPU.LOAD.TIME} |<p>The time during which the CPU utilization may exceed the threshold.</p> |`5m` |
+|{$JMX.FILE.DESCRIPTORS.MAX} |<p>A threshold in percent for file descriptors count trigger.</p> |`85` |
+|{$JMX.FILE.DESCRIPTORS.TIME} |<p>The time during which the file descriptors count may exceed the threshold.</p> |`3m` |
+|{$JMX.HEAP.MEM.USAGE.MAX} |<p>A threshold in percent for Heap memory utilization trigger.</p> |`85` |
+|{$JMX.HEAP.MEM.USAGE.TIME} |<p>The time during which the Heap memory utilization may exceed the threshold.</p> |`10m` |
+|{$JMX.MP.USAGE.MAX} |<p>A threshold in percent for memory pools utilization trigger. Use a context to change the threshold for a specific pool.</p> |`85` |
+|{$JMX.MP.USAGE.TIME} |<p>The time during which the memory pools utilization may exceed the threshold.</p> |`10m` |
+|{$JMX.NONHEAP.MEM.USAGE.MAX} |<p>A threshold in percent for Non-heap memory utilization trigger.</p> |`85` |
+|{$JMX.NONHEAP.MEM.USAGE.TIME} |<p>The time during which the Non-heap memory utilization may exceed the threshold.</p> |`10m` |
## Template links
@@ -40,85 +40,85 @@ There are no template links in this template.
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|-------|----------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| JMX | ClassLoading: Loaded class count | <p>Displays number of classes that are currently loaded in the Java virtual machine.</p> | JMX | jmx["java.lang:type=ClassLoading","LoadedClassCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | ClassLoading: Total loaded class count | <p>Displays the total number of classes that have been loaded since the Java virtual machine has started execution.</p> | JMX | jmx["java.lang:type=ClassLoading","TotalLoadedClassCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | ClassLoading: Unloaded class count | <p>Displays the total number of classes that have been loaded since the Java virtual machine has started execution.</p> | JMX | jmx["java.lang:type=ClassLoading","UnloadedClassCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | Compilation: Name of the current JIT compiler | <p>Displays the total number of classes unloaded since the Java virtual machine has started execution.</p> | JMX | jmx["java.lang:type=Compilation","Name"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-| JMX | Compilation: Accumulated time spent | <p>Displays the approximate accumulated elapsed time spent in compilation, in seconds.</p> | JMX | jmx["java.lang:type=Compilation","TotalCompilationTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | GarbageCollector: ConcurrentMarkSweep number of collections per second | <p>Displays the total number of collections that have occurred per second.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=ConcurrentMarkSweep","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| JMX | GarbageCollector: ConcurrentMarkSweep accumulated time spent in collection | <p>Displays the approximate accumulated collection elapsed time, in seconds.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=ConcurrentMarkSweep","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | GarbageCollector: Copy number of collections per second | <p>Displays the total number of collections that have occurred per second.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=Copy","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| JMX | GarbageCollector: Copy accumulated time spent in collection | <p>Displays the approximate accumulated collection elapsed time, in seconds.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=Copy","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | GarbageCollector: MarkSweepCompact number of collections per second | <p>Displays the total number of collections that have occurred per second.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=MarkSweepCompact","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| JMX | GarbageCollector: MarkSweepCompact accumulated time spent in collection | <p>Displays the approximate accumulated collection elapsed time, in seconds.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=MarkSweepCompact","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | GarbageCollector: ParNew number of collections per second | <p>Displays the total number of collections that have occurred per second.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=ParNew","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| JMX | GarbageCollector: ParNew accumulated time spent in collection | <p>Displays the approximate accumulated collection elapsed time, in seconds.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=ParNew","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | GarbageCollector: PS MarkSweep number of collections per second | <p>Displays the total number of collections that have occurred per second.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=PS MarkSweep","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| JMX | GarbageCollector: PS MarkSweep accumulated time spent in collection | <p>Displays the approximate accumulated collection elapsed time, in seconds.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=PS MarkSweep","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | GarbageCollector: PS Scavenge number of collections per second | <p>Displays the total number of collections that have occurred per second.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=PS Scavenge","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| JMX | GarbageCollector: PS Scavenge accumulated time spent in collection | <p>Displays the approximate accumulated collection elapsed time, in seconds.</p> | JMX | jmx["java.lang:type=GarbageCollector,name=PS Scavenge","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | Memory: Heap memory committed | <p>Current heap memory allocated. This amount of memory is guaranteed for the Java virtual machine to use.</p> | JMX | jmx["java.lang:type=Memory","HeapMemoryUsage.committed"] |
-| JMX | Memory: Heap memory maximum size | <p>Maximum amount of heap that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=Memory","HeapMemoryUsage.max"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | Memory: Heap memory used | <p>Current memory usage outside the heap.</p> | JMX | jmx["java.lang:type=Memory","HeapMemoryUsage.used"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | Memory: Non-Heap memory committed | <p>Current memory allocated outside the heap. This amount of memory is guaranteed for the Java virtual machine to use.</p> | JMX | jmx["java.lang:type=Memory","NonHeapMemoryUsage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | Memory: Non-Heap memory maximum size | <p>Maximum amount of non-heap memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=Memory","NonHeapMemoryUsage.max"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | Memory: Non-Heap memory used | <p>Current memory usage outside the heap</p> | JMX | jmx["java.lang:type=Memory","NonHeapMemoryUsage.used"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | Memory: Object pending finalization count | <p>The approximate number of objects for which finalization is pending.</p> | JMX | jmx["java.lang:type=Memory","ObjectPendingFinalizationCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | MemoryPool: CMS Old Gen committed | <p>Current memory allocated</p> | JMX | jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | MemoryPool: CMS Old Gen maximum size | <p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.max"] |
-| JMX | MemoryPool: CMS Old Gen used | <p>Current memory usage</p> | JMX | jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.used"] |
-| JMX | MemoryPool: CMS Perm Gen committed | <p>Current memory allocated</p> | JMX | jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | MemoryPool: CMS Perm Gen maximum size | <p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.max"] |
-| JMX | MemoryPool: CMS Perm Gen used | <p>Current memory usage</p> | JMX | jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.used"] |
-| JMX | MemoryPool: Code Cache committed | <p>Current memory allocated</p> | JMX | jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | MemoryPool: CodeCache maximum size | <p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.max"] |
-| JMX | MemoryPool: Code Cache used | <p>Current memory usage</p> | JMX | jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.used"] |
-| JMX | MemoryPool: Perm Gen committed | <p>Current memory allocated</p> | JMX | jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | MemoryPool: Perm Gen maximum size | <p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.max"] |
-| JMX | MemoryPool: Perm Gen used | <p>Current memory usage</p> | JMX | jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.used"] |
-| JMX | MemoryPool: PS Old Gen | <p>Current memory allocated</p> | JMX | jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | MemoryPool: PS Old Gen maximum size | <p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.max"] |
-| JMX | MemoryPool: PS Old Gen used | <p>Current memory usage</p> | JMX | jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.used"] |
-| JMX | MemoryPool: PS Perm Gen committed | <p>Current memory allocated</p> | JMX | jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | MemoryPool: PS Perm Gen maximum size | <p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.max"] |
-| JMX | MemoryPool: PS Perm Gen used | <p>Current memory usage</p> | JMX | jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.used"] |
-| JMX | MemoryPool: Tenured Gen committed | <p>Current memory allocated</p> | JMX | jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | MemoryPool: Tenured Gen maximum size | <p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> | JMX | jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.max"] |
-| JMX | MemoryPool: Tenured Gen used | <p>Current memory usage</p> | JMX | jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.used"] |
-| JMX | OperatingSystem: File descriptors maximum count | <p>This is the number of file descriptors we can have opened in the same process, as determined by the operating system. You can never have more file descriptors than this number.</p> | JMX | jmx["java.lang:type=OperatingSystem","MaxFileDescriptorCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | OperatingSystem: File descriptors opened | <p>This is the number of opened file descriptors at the moment, if this reaches the MaxFileDescriptorCount, the application will throw an IOException: Too many open files. This could mean you’re are opening file descriptors and never closing them.</p> | JMX | jmx["java.lang:type=OperatingSystem","OpenFileDescriptorCount"] |
-| JMX | OperatingSystem: Process CPU Load | <p>ProcessCpuLoad represents the CPU load in this process.</p> | JMX | jmx["java.lang:type=OperatingSystem","ProcessCpuLoad"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `100`</p> |
-| JMX | Runtime: JVM uptime | <p>-</p> | JMX | jmx["java.lang:type=Runtime","Uptime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p> |
-| JMX | Runtime: JVM name | <p>-</p> | JMX | jmx["java.lang:type=Runtime","VmName"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-| JMX | Runtime: JVM version | <p>-</p> | JMX | jmx["java.lang:type=Runtime","VmVersion"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-| JMX | Threading: Daemon thread count | <p>Number of daemon threads running.</p> | JMX | jmx["java.lang:type=Threading","DaemonThreadCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| JMX | Threading: Peak thread count | <p>Maximum number of threads being executed at the same time since the JVM was started or the peak was reset.</p> | JMX | jmx["java.lang:type=Threading","PeakThreadCount"] |
-| JMX | Threading: Thread count | <p>The number of threads running at the current moment.</p> | JMX | jmx["java.lang:type=Threading","ThreadCount"] |
-| JMX | Threading: Total started thread count | <p>The number of threads started since the JVM was launched.</p> | JMX | jmx["java.lang:type=Threading","TotalStartedThreadCount"] |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|JMX |ClassLoading: Loaded class count |<p>Displays number of classes that are currently loaded in the Java virtual machine.</p> |JMX |jmx["java.lang:type=ClassLoading","LoadedClassCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |ClassLoading: Total loaded class count |<p>Displays the total number of classes that have been loaded since the Java virtual machine has started execution.</p> |JMX |jmx["java.lang:type=ClassLoading","TotalLoadedClassCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |ClassLoading: Unloaded class count |<p>Displays the total number of classes that have been loaded since the Java virtual machine has started execution.</p> |JMX |jmx["java.lang:type=ClassLoading","UnloadedClassCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |Compilation: Name of the current JIT compiler |<p>Displays the total number of classes unloaded since the Java virtual machine has started execution.</p> |JMX |jmx["java.lang:type=Compilation","Name"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|JMX |Compilation: Accumulated time spent |<p>Displays the approximate accumulated elapsed time spent in compilation, in seconds.</p> |JMX |jmx["java.lang:type=Compilation","TotalCompilationTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |GarbageCollector: ConcurrentMarkSweep number of collections per second |<p>Displays the total number of collections that have occurred per second.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=ConcurrentMarkSweep","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|JMX |GarbageCollector: ConcurrentMarkSweep accumulated time spent in collection |<p>Displays the approximate accumulated collection elapsed time, in seconds.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=ConcurrentMarkSweep","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |GarbageCollector: Copy number of collections per second |<p>Displays the total number of collections that have occurred per second.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=Copy","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|JMX |GarbageCollector: Copy accumulated time spent in collection |<p>Displays the approximate accumulated collection elapsed time, in seconds.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=Copy","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |GarbageCollector: MarkSweepCompact number of collections per second |<p>Displays the total number of collections that have occurred per second.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=MarkSweepCompact","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|JMX |GarbageCollector: MarkSweepCompact accumulated time spent in collection |<p>Displays the approximate accumulated collection elapsed time, in seconds.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=MarkSweepCompact","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |GarbageCollector: ParNew number of collections per second |<p>Displays the total number of collections that have occurred per second.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=ParNew","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|JMX |GarbageCollector: ParNew accumulated time spent in collection |<p>Displays the approximate accumulated collection elapsed time, in seconds.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=ParNew","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |GarbageCollector: PS MarkSweep number of collections per second |<p>Displays the total number of collections that have occurred per second.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=PS MarkSweep","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|JMX |GarbageCollector: PS MarkSweep accumulated time spent in collection |<p>Displays the approximate accumulated collection elapsed time, in seconds.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=PS MarkSweep","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |GarbageCollector: PS Scavenge number of collections per second |<p>Displays the total number of collections that have occurred per second.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=PS Scavenge","CollectionCount"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|JMX |GarbageCollector: PS Scavenge accumulated time spent in collection |<p>Displays the approximate accumulated collection elapsed time, in seconds.</p> |JMX |jmx["java.lang:type=GarbageCollector,name=PS Scavenge","CollectionTime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |Memory: Heap memory committed |<p>Current heap memory allocated. This amount of memory is guaranteed for the Java virtual machine to use.</p> |JMX |jmx["java.lang:type=Memory","HeapMemoryUsage.committed"] |
+|JMX |Memory: Heap memory maximum size |<p>Maximum amount of heap that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=Memory","HeapMemoryUsage.max"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |Memory: Heap memory used |<p>Current memory usage outside the heap.</p> |JMX |jmx["java.lang:type=Memory","HeapMemoryUsage.used"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |Memory: Non-Heap memory committed |<p>Current memory allocated outside the heap. This amount of memory is guaranteed for the Java virtual machine to use.</p> |JMX |jmx["java.lang:type=Memory","NonHeapMemoryUsage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |Memory: Non-Heap memory maximum size |<p>Maximum amount of non-heap memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=Memory","NonHeapMemoryUsage.max"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |Memory: Non-Heap memory used |<p>Current memory usage outside the heap</p> |JMX |jmx["java.lang:type=Memory","NonHeapMemoryUsage.used"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |Memory: Object pending finalization count |<p>The approximate number of objects for which finalization is pending.</p> |JMX |jmx["java.lang:type=Memory","ObjectPendingFinalizationCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |MemoryPool: CMS Old Gen committed |<p>Current memory allocated</p> |JMX |jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |MemoryPool: CMS Old Gen maximum size |<p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.max"] |
+|JMX |MemoryPool: CMS Old Gen used |<p>Current memory usage</p> |JMX |jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.used"] |
+|JMX |MemoryPool: CMS Perm Gen committed |<p>Current memory allocated</p> |JMX |jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |MemoryPool: CMS Perm Gen maximum size |<p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.max"] |
+|JMX |MemoryPool: CMS Perm Gen used |<p>Current memory usage</p> |JMX |jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.used"] |
+|JMX |MemoryPool: Code Cache committed |<p>Current memory allocated</p> |JMX |jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |MemoryPool: CodeCache maximum size |<p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.max"] |
+|JMX |MemoryPool: Code Cache used |<p>Current memory usage</p> |JMX |jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.used"] |
+|JMX |MemoryPool: Perm Gen committed |<p>Current memory allocated</p> |JMX |jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |MemoryPool: Perm Gen maximum size |<p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.max"] |
+|JMX |MemoryPool: Perm Gen used |<p>Current memory usage</p> |JMX |jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.used"] |
+|JMX |MemoryPool: PS Old Gen |<p>Current memory allocated</p> |JMX |jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |MemoryPool: PS Old Gen maximum size |<p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.max"] |
+|JMX |MemoryPool: PS Old Gen used |<p>Current memory usage</p> |JMX |jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.used"] |
+|JMX |MemoryPool: PS Perm Gen committed |<p>Current memory allocated</p> |JMX |jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |MemoryPool: PS Perm Gen maximum size |<p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.max"] |
+|JMX |MemoryPool: PS Perm Gen used |<p>Current memory usage</p> |JMX |jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.used"] |
+|JMX |MemoryPool: Tenured Gen committed |<p>Current memory allocated</p> |JMX |jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.committed"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |MemoryPool: Tenured Gen maximum size |<p>Maximum amount of memory that can be used for memory management. This amount of memory is not guaranteed to be available if it is greater than the amount of committed memory. The Java virtual machine may fail to allocate memory even if the amount of used memory does not exceed this maximum size.</p> |JMX |jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.max"] |
+|JMX |MemoryPool: Tenured Gen used |<p>Current memory usage</p> |JMX |jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.used"] |
+|JMX |OperatingSystem: File descriptors maximum count |<p>This is the number of file descriptors we can have opened in the same process, as determined by the operating system. You can never have more file descriptors than this number.</p> |JMX |jmx["java.lang:type=OperatingSystem","MaxFileDescriptorCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |OperatingSystem: File descriptors opened |<p>This is the number of opened file descriptors at the moment, if this reaches the MaxFileDescriptorCount, the application will throw an IOException: Too many open files. This could mean you're are opening file descriptors and never closing them.</p> |JMX |jmx["java.lang:type=OperatingSystem","OpenFileDescriptorCount"] |
+|JMX |OperatingSystem: Process CPU Load |<p>ProcessCpuLoad represents the CPU load in this process.</p> |JMX |jmx["java.lang:type=OperatingSystem","ProcessCpuLoad"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `100`</p> |
+|JMX |Runtime: JVM uptime |<p>-</p> |JMX |jmx["java.lang:type=Runtime","Uptime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p> |
+|JMX |Runtime: JVM name |<p>-</p> |JMX |jmx["java.lang:type=Runtime","VmName"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|JMX |Runtime: JVM version |<p>-</p> |JMX |jmx["java.lang:type=Runtime","VmVersion"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|JMX |Threading: Daemon thread count |<p>Number of daemon threads running.</p> |JMX |jmx["java.lang:type=Threading","DaemonThreadCount"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|JMX |Threading: Peak thread count |<p>Maximum number of threads being executed at the same time since the JVM was started or the peak was reset.</p> |JMX |jmx["java.lang:type=Threading","PeakThreadCount"] |
+|JMX |Threading: Thread count |<p>The number of threads running at the current moment.</p> |JMX |jmx["java.lang:type=Threading","ThreadCount"] |
+|JMX |Threading: Total started thread count |<p>The number of threads started since the JVM was launched.</p> |JMX |jmx["java.lang:type=Threading","TotalStartedThreadCount"] |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|-----------------------------------------------------------------------------------------------------------------------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|----------------------------------|
-| Compilation: {HOST.NAME} uses suboptimal JIT compiler | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=Compilation","Name"].str(Client)}=1` | INFO | <p>Manual close: YES</p> |
-| GarbageCollector: Concurrent Mark Sweep in fire fighting mode | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=GarbageCollector,name=ConcurrentMarkSweep","CollectionCount"].last()}>{Generic Java JMX:jmx["java.lang:type=GarbageCollector,name=ParNew","CollectionCount"].last()}` | AVERAGE | |
-| GarbageCollector: Mark Sweep Compact in fire fighting mode | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=GarbageCollector,name=MarkSweepCompact","CollectionCount"].last()}>{Generic Java JMX:jmx["java.lang:type=GarbageCollector,name=Copy","CollectionCount"].last()}` | AVERAGE | |
-| GarbageCollector: PS Mark Sweep in fire fighting mode | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=GarbageCollector,name=PS MarkSweep","CollectionCount"].last()}>{Generic Java JMX:jmx["java.lang:type=GarbageCollector,name=PS Scavenge","CollectionCount"].last()}` | AVERAGE | |
-| Memory: Heap memory usage more than {$JMX.HEAP.USAGE.MAX}% for {$JMX.HEAP.MEM.USAGE.TIME} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=Memory","HeapMemoryUsage.used"].min({$JMX.HEAP.MEM.USAGE.TIME})}>({Generic Java JMX:jmx["java.lang:type=Memory","HeapMemoryUsage.max"].last()}*{$JMX.HEAP.MEM.USAGE.MAX}/100)` | WARNING | |
-| Memory: Non-Heap memory usage more than {$JMX.NONHEAP.MEM.USAGE.MAX}% for {$JMX.NONHEAP.MEM.USAGE.TIME} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=Memory","NonHeapMemoryUsage.used"].min({$JMX.NONHEAP.MEM.USAGE.TIME})}>({Generic Java JMX:jmx["java.lang:type=Memory","NonHeapMemoryUsage.max"].last()}*{$JMX.NONHEAP.MEM.USAGE.MAX}/100)` | WARNING | |
-| MemoryPool: CMS Old Gen memory usage more than {$JMX.MP.USAGE.MAX:"CMS Old Gen"}% for {$JMX.MP.USAGE.TIME:"CMS Old Gen"} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.used"].min({$JMX.MP.USAGE.TIME:"CMS Old Gen"})}>({Generic Java JMX:jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.max"].last()}*{$JMX.MP.USAGE.MAX:"CMS Old Gen"}/100)` | WARNING | |
-| MemoryPool: CMS Perm Gen memory usage more than {$JMX.MP.USAGE.MAX:"CMS Perm Gen"}% for {$JMX.MP.USAGE.TIME:"CMS Perm Gen"} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.used"].min({$JMX.MP.USAGE.TIME:"CMS Perm Gen"})}>({Generic Java JMX:jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.max"].last()}*{$JMX.MP.USAGE.MAX:"CMS Perm Gen"}/100)` | WARNING | |
-| MemoryPool: Code Cache memory usage more than {$JMX.MP.USAGE.MAX:"Code Cache"}% for {$JMX.MP.USAGE.TIME:"Code Cache"} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.used"].min({$JMX.MP.USAGE.TIME:"Code Cache"})}>({Generic Java JMX:jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.max"].last()}*{$JMX.MP.USAGE.MAX:"Code Cache"}/100)` | WARNING | |
-| MemoryPool: Perm Gen memory usage more than {$JMX.MP.USAGE.MAX:"Perm Gen"}% for {$JMX.MP.USAGE.TIME:"Perm Gen"} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.used"].min({$JMX.MP.USAGE.TIME:"Perm Gen"})}>({Generic Java JMX:jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.max"].last()}*{$JMX.MP.USAGE.MAX:"Perm Gen"}/100)` | WARNING | |
-| MemoryPool: PS Old Gen memory usage more than {$JMX.MP.USAGE.MAX:"PS Old Gen"}% for {$JMX.MP.USAGE.TIME:"PS Old Gen"} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.used"].min({$JMX.MP.USAGE.TIME:"PS Old Gen"})}>({Generic Java JMX:jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.max"].last()}*{$JMX.MP.USAGE.MAX:"PS Old Gen"}/100)` | WARNING | |
-| MemoryPool: PS Perm Gen memory usage more than {$JMX.MP.USAGE.MAX:"PS Perm Gen"}% for {$JMX.MP.USAGE.TIME:"PS Perm Gen"} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.used"].min({$JMX.MP.USAGE.TIME:"PS Perm Gen"})}>({Generic Java JMX:jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.max"].last()}*{$JMX.MP.USAGE.MAX:"PS Perm Gen"}/100)` | WARNING | |
-| MemoryPool: Tenured Gen memory usage more than {$JMX.MP.USAGE.MAX:"Tenured Gen"}% for {$JMX.MP.USAGE.TIME:"Tenured Gen"} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.used"].min({$JMX.MP.USAGE.TIME:"Tenured Gen"})}>({Generic Java JMX:jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.max"].last()}*{$JMX.MP.USAGE.MAX:"Tenured Gen"}/100)` | WARNING | |
-| OperatingSystem: Opened file descriptor count more than {$JMX.FILE.DESCRIPTORS.MAX}% of maximum | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=OperatingSystem","OpenFileDescriptorCount"].min({$JMX.FILE.DESCRIPTORS.TIME})}>({Generic Java JMX:jmx["java.lang:type=OperatingSystem","MaxFileDescriptorCount"].last()}*{$JMX.FILE.DESCRIPTORS.MAX}/100)` | WARNING | |
-| OperatingSystem: Process CPU Load more than {$JMX.CPU.LOAD.MAX}% for {$JMX.CPU.LOAD.TIME} | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=OperatingSystem","ProcessCpuLoad"].min({$JMX.CPU.LOAD.TIME})}>{$JMX.CPU.LOAD.MAX}` | AVERAGE | |
-| Runtime: JVM is not reachable | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=Runtime","Uptime"].nodata(5m)}=1` | AVERAGE | <p>Manual close: YES</p> |
-| Runtime: {HOST.NAME} runs suboptimal VM type | <p>-</p> | `{TEMPLATE_NAME:jmx["java.lang:type=Runtime","VmName"].str(Server)}<>1` | INFO | <p>Manual close: YES</p> |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|Compilation: {HOST.NAME} uses suboptimal JIT compiler |<p>-</p> |`find(/Generic Java JMX/jmx["java.lang:type=Compilation","Name"],,"like","Client")=1` |INFO |<p>Manual close: YES</p> |
+|GarbageCollector: Concurrent Mark Sweep in fire fighting mode |<p>-</p> |`last(/Generic Java JMX/jmx["java.lang:type=GarbageCollector,name=ConcurrentMarkSweep","CollectionCount"])>last(/Generic Java JMX/jmx["java.lang:type=GarbageCollector,name=ParNew","CollectionCount"])` |AVERAGE | |
+|GarbageCollector: Mark Sweep Compact in fire fighting mode |<p>-</p> |`last(/Generic Java JMX/jmx["java.lang:type=GarbageCollector,name=MarkSweepCompact","CollectionCount"])>last(/Generic Java JMX/jmx["java.lang:type=GarbageCollector,name=Copy","CollectionCount"])` |AVERAGE | |
+|GarbageCollector: PS Mark Sweep in fire fighting mode |<p>-</p> |`last(/Generic Java JMX/jmx["java.lang:type=GarbageCollector,name=PS MarkSweep","CollectionCount"])>last(/Generic Java JMX/jmx["java.lang:type=GarbageCollector,name=PS Scavenge","CollectionCount"])` |AVERAGE | |
+|Memory: Heap memory usage more than {$JMX.HEAP.USAGE.MAX}% for {$JMX.HEAP.MEM.USAGE.TIME} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=Memory","HeapMemoryUsage.used"],{$JMX.HEAP.MEM.USAGE.TIME})>(last(/Generic Java JMX/jmx["java.lang:type=Memory","HeapMemoryUsage.max"])*{$JMX.HEAP.MEM.USAGE.MAX}/100)` |WARNING | |
+|Memory: Non-Heap memory usage more than {$JMX.NONHEAP.MEM.USAGE.MAX}% for {$JMX.NONHEAP.MEM.USAGE.TIME} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=Memory","NonHeapMemoryUsage.used"],{$JMX.NONHEAP.MEM.USAGE.TIME})>(last(/Generic Java JMX/jmx["java.lang:type=Memory","NonHeapMemoryUsage.max"])*{$JMX.NONHEAP.MEM.USAGE.MAX}/100)` |WARNING | |
+|MemoryPool: CMS Old Gen memory usage more than {$JMX.MP.USAGE.MAX:"CMS Old Gen"}% for {$JMX.MP.USAGE.TIME:"CMS Old Gen"} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.used"],{$JMX.MP.USAGE.TIME:"CMS Old Gen"})>(last(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=CMS Old Gen","Usage.max"])*{$JMX.MP.USAGE.MAX:"CMS Old Gen"}/100)` |WARNING | |
+|MemoryPool: CMS Perm Gen memory usage more than {$JMX.MP.USAGE.MAX:"CMS Perm Gen"}% for {$JMX.MP.USAGE.TIME:"CMS Perm Gen"} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.used"],{$JMX.MP.USAGE.TIME:"CMS Perm Gen"})>(last(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=CMS Perm Gen","Usage.max"])*{$JMX.MP.USAGE.MAX:"CMS Perm Gen"}/100)` |WARNING | |
+|MemoryPool: Code Cache memory usage more than {$JMX.MP.USAGE.MAX:"Code Cache"}% for {$JMX.MP.USAGE.TIME:"Code Cache"} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.used"],{$JMX.MP.USAGE.TIME:"Code Cache"})>(last(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=Code Cache","Usage.max"])*{$JMX.MP.USAGE.MAX:"Code Cache"}/100)` |WARNING | |
+|MemoryPool: Perm Gen memory usage more than {$JMX.MP.USAGE.MAX:"Perm Gen"}% for {$JMX.MP.USAGE.TIME:"Perm Gen"} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.used"],{$JMX.MP.USAGE.TIME:"Perm Gen"})>(last(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=Perm Gen","Usage.max"])*{$JMX.MP.USAGE.MAX:"Perm Gen"}/100)` |WARNING | |
+|MemoryPool: PS Old Gen memory usage more than {$JMX.MP.USAGE.MAX:"PS Old Gen"}% for {$JMX.MP.USAGE.TIME:"PS Old Gen"} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.used"],{$JMX.MP.USAGE.TIME:"PS Old Gen"})>(last(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=PS Old Gen","Usage.max"])*{$JMX.MP.USAGE.MAX:"PS Old Gen"}/100)` |WARNING | |
+|MemoryPool: PS Perm Gen memory usage more than {$JMX.MP.USAGE.MAX:"PS Perm Gen"}% for {$JMX.MP.USAGE.TIME:"PS Perm Gen"} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.used"],{$JMX.MP.USAGE.TIME:"PS Perm Gen"})>(last(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=PS Perm Gen","Usage.max"])*{$JMX.MP.USAGE.MAX:"PS Perm Gen"}/100)` |WARNING | |
+|MemoryPool: Tenured Gen memory usage more than {$JMX.MP.USAGE.MAX:"Tenured Gen"}% for {$JMX.MP.USAGE.TIME:"Tenured Gen"} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.used"],{$JMX.MP.USAGE.TIME:"Tenured Gen"})>(last(/Generic Java JMX/jmx["java.lang:type=MemoryPool,name=Tenured Gen","Usage.max"])*{$JMX.MP.USAGE.MAX:"Tenured Gen"}/100)` |WARNING | |
+|OperatingSystem: Opened file descriptor count more than {$JMX.FILE.DESCRIPTORS.MAX}% of maximum |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=OperatingSystem","OpenFileDescriptorCount"],{$JMX.FILE.DESCRIPTORS.TIME})>(last(/Generic Java JMX/jmx["java.lang:type=OperatingSystem","MaxFileDescriptorCount"])*{$JMX.FILE.DESCRIPTORS.MAX}/100)` |WARNING | |
+|OperatingSystem: Process CPU Load more than {$JMX.CPU.LOAD.MAX}% for {$JMX.CPU.LOAD.TIME} |<p>-</p> |`min(/Generic Java JMX/jmx["java.lang:type=OperatingSystem","ProcessCpuLoad"],{$JMX.CPU.LOAD.TIME})>{$JMX.CPU.LOAD.MAX}` |AVERAGE | |
+|Runtime: JVM is not reachable |<p>-</p> |`nodata(/Generic Java JMX/jmx["java.lang:type=Runtime","Uptime"],5m)=1` |AVERAGE |<p>Manual close: YES</p> |
+|Runtime: {HOST.NAME} runs suboptimal VM type |<p>-</p> |`find(/Generic Java JMX/jmx["java.lang:type=Runtime","VmName"],,"like","Server")<>1` |INFO |<p>Manual close: YES</p> |
## Feedback
diff --git a/templates/app/generic_java_jmx/template_app_generic_java_jmx.yaml b/templates/app/generic_java_jmx/template_app_generic_java_jmx.yaml
index a9f1b97f695..5fb8c0fcd96 100644
--- a/templates/app/generic_java_jmx/template_app_generic_java_jmx.yaml
+++ b/templates/app/generic_java_jmx/template_app_generic_java_jmx.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:17Z'
+ date: '2021-12-19T15:19:36Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -10,7 +10,7 @@ zabbix_export:
uuid: 72aab08f7f27406a8f2c291648e5ba95
template: 'Generic Java JMX'
name: 'Generic Java JMX'
- description: 'Template tooling version used: 0.38'
+ description: 'Template tooling version used: 0.40'
groups:
-
name: Templates/Applications
@@ -812,7 +812,7 @@ zabbix_export:
type: JMX
key: 'jmx["java.lang:type=OperatingSystem","OpenFileDescriptorCount"]'
history: 7d
- description: 'This is the number of opened file descriptors at the moment, if this reaches the MaxFileDescriptorCount, the application will throw an IOException: Too many open files. This could mean you’re are opening file descriptors and never closing them.'
+ description: 'This is the number of opened file descriptors at the moment, if this reaches the MaxFileDescriptorCount, the application will throw an IOException: Too many open files. This could mean you''re are opening file descriptors and never closing them.'
jmx_endpoint: 'service:jmx:rmi:///jndi/rmi://{HOST.CONN}:{HOST.PORT}/jmxrmi'
tags:
-
diff --git a/templates/app/gitlab_http/README.md b/templates/app/gitlab_http/README.md
index ad84d13f81a..ad8b587f461 100644
--- a/templates/app/gitlab_http/README.md
+++ b/templates/app/gitlab_http/README.md
@@ -7,7 +7,7 @@ For Zabbix version: 5.4 and higher
The template to monitor GitLab by Zabbix that works without any external scripts.
Most of the metrics are collected in one go, thanks to Zabbix bulk data collection.
-Template `GitLab by HTTP` — collects metrics by HTTP agent from GitLab /metrics endpoint.
+Template `GitLab by HTTP` — collects metrics by HTTP agent from GitLab /metrics endpoint.
See https://docs.gitlab.com/ee/administration/monitoring/prometheus/gitlab_metrics.html.
@@ -22,9 +22,10 @@ This template was tested on:
This template works with self-hosted GitLab instances. Internal service metrics are collected from GitLab /-/metrics endpoint.
To access the metrics, the client IP address must be [explicitly allowed](https://docs.gitlab.com/ee/administration/monitoring/ip_whitelist.html).
-Don't forget to change the macros {$GITLAB.URL}.
-Also, see the Macros section for a list of macros used to set trigger values.
-*NOTE.* Some metrics may not be collected depending on your Gitlab instance version and configuration. See [Gitlab’s documentation](https://docs.gitlab.com/ee/administration/monitoring/prometheus/gitlab_metrics.html) for further information about its metric collection.
+Don't forget to change the macros {$GITLAB.URL}.
+Also, see the Macros section for a list of macros used to set trigger values.
+
+*NOTE.* Some metrics may not be collected depending on your Gitlab instance version and configuration. See [Gitlab's documentation](https://docs.gitlab.com/ee/administration/monitoring/prometheus/gitlab_metrics.html) for further information about its metric collection.
## Zabbix configuration
@@ -59,15 +60,15 @@ There are no template links in this template.
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
-|GitLab |GitLab: Instance readiness check |<p>The readiness probe checks whether the GitLab instance is ready to accept traffic via Rails Controllers.</p> |HTTP_AGENT |gitlab.readiness<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED<p>- JSONPATH: `$.master_check[0].status`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-|GitLab |GitLab: Application server status |<p>Checks whether the application server is running. This probe is used to know if Rails Controllers are not deadlocked due to a multi-threading.</p> |HTTP_AGENT |gitlab.liveness<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED<p>- JSONPATH: `$.status`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|GitLab |GitLab: Instance readiness check |<p>The readiness probe checks whether the GitLab instance is ready to accept traffic via Rails Controllers.</p> |HTTP_AGENT |gitlab.readiness<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> {"master_check":[{"status":"failed"}]}`</p><p>- JSONPATH: `$.master_check[0].status`</p><p>- BOOL_TO_DECIMAL</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|GitLab |GitLab: Application server status |<p>Checks whether the application server is running. This probe is used to know if Rails Controllers are not deadlocked due to a multi-threading.</p> |HTTP_AGENT |gitlab.liveness<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> {"status": "failed"}`</p><p>- JSONPATH: `$.status`</p><p>- BOOL_TO_DECIMAL</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|GitLab |GitLab: Version |<p>Version of the GitLab instance.</p> |DEPENDENT |gitlab.deployments.version<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="deployments")].labels.version.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|GitLab |GitLab: Ruby: First process start time |<p>Minimum UNIX timestamp of ruby processes start time.</p> |DEPENDENT |gitlab.ruby.process_start_time_seconds.first<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_start_time_seconds")].value.min()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|GitLab |GitLab: Ruby: Last process start time |<p>Maximum UNIX timestamp ruby processes start time.</p> |DEPENDENT |gitlab.ruby.process_start_time_seconds.last<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_start_time_seconds")].value.max()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|GitLab |GitLab: User logins, total |<p>Counter of how many users have logged in since GitLab was started or restarted.</p> |DEPENDENT |gitlab.user_session_logins_total<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="user_session_logins_total")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|GitLab |GitLab: User CAPTCHA logins failed, total |<p>Counter of failed CAPTCHA attempts during login.</p> |DEPENDENT |gitlab.failed_login_captcha_total<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="failed_login_captcha_total")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|GitLab |GitLab: User CAPTCHA logins, total |<p>Counter of successful CAPTCHA attempts during login.</p> |DEPENDENT |gitlab.successful_login_captcha_total<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="successful_login_captcha_total")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|GitLab |GitLab: Upload file does not exist |<p>Number of times an upload record could not find its file.</p> |DEPENDENT |gitlab.upload_file_does_not_exist<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="upload_file_does_not_exist ")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|GitLab |GitLab: Upload file does not exist |<p>Number of times an upload record could not find its file.</p> |DEPENDENT |gitlab.upload_file_does_not_exist<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="upload_file_does_not_exist")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|GitLab |GitLab: Pipelines: Processing events, total |<p>Total amount of pipeline processing events.</p> |DEPENDENT |gitlab.pipeline.processing_events_total<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_ci_pipeline_processing_events_total")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|GitLab |GitLab: Pipelines: Created, total |<p>Counter of pipelines created.</p> |DEPENDENT |gitlab.pipeline.created_total<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="pipelines_created_total")].value.sum()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|GitLab |GitLab: Pipelines: Auto DevOps pipelines, total |<p>Counter of completed Auto DevOps pipelines.</p> |DEPENDENT |gitlab.pipeline.auto_devops_completed.total<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="auto_devops_pipelines_completed_total")].value.sum()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
@@ -80,27 +81,27 @@ There are no template links in this template.
|GitLab |GitLab: Database: Connection pool, idle |<p>Connections to the main database not in use.</p> |DEPENDENT |gitlab.database.connection_pool_idle<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_database_connection_pool_idle" && @.labels.class == "ActiveRecord::Base")].value.sum()`</p> |
|GitLab |GitLab: Database: Connection pool, size |<p>Total connection to the main database pool capacity.</p> |DEPENDENT |gitlab.database.connection_pool_size<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_database_connection_pool_size" && @.labels.class == "ActiveRecord::Base")].value.sum()`</p> |
|GitLab |GitLab: Database: Connection pool, waiting |<p>Threads currently waiting on this queue.</p> |DEPENDENT |gitlab.database.connection_pool_waiting<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_database_connection_pool_waiting" && @.labels.class == "ActiveRecord::Base")].value.sum()`</p> |
-|GitLab |GitLab: Redis: Client requests rate, queues |<p>Number of Redis client requests per second. (Instance: queues)</p> |DEPENDENT |gitlab.redis.client_requests.queues.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_requests_total" && @.labels.storage == "queues")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Redis: Client requests rate, cache |<p>Number of Redis client requests per second. (Instance: cache)</p> |DEPENDENT |gitlab.redis.client_requests.cache.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_requests_total" && @.labels.storage == "cache")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Redis: Client requests rate, shared_state |<p>Number of Redis client requests per second. (Instance: shared_state)</p> |DEPENDENT |gitlab.redis.client_requests.shared_state.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_requests_total" && @.labels.storage == "shared_state")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Redis: Client exceptions rate, queues |<p>Number of Redis client exceptions per second. (Instance: queues)</p> |DEPENDENT |gitlab.redis.client_exceptions.queues.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_exceptions_total" && @.labels.storage == "queues")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Redis: Client exceptions rate, cache |<p>Number of Redis client exceptions per second. (Instance: cache)</p> |DEPENDENT |gitlab.redis.client_exceptions.cache.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_exceptions_total" && @.labels.storage == "cache")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Redis: client exceptions rate, shared_state |<p>Number of Redis client exceptions per second. (Instance: shared_state)</p> |DEPENDENT |gitlab.redis.client_exceptions.shared_state.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_exceptions_total" && @.labels.storage == "shared_state")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Cache: Misses rate, total |<p>The cache read miss count.</p> |DEPENDENT |gitlab.cache.misses_total.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_cache_misses_total")].value.sum()`</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Cache: Operations rate, total |<p>The count of cache operations.</p> |DEPENDENT |gitlab.cache.operations_total.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_cache_operations_total")].value.sum()`</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Ruby: CPU usage per second |<p>Average CPU time util in seconds.</p> |DEPENDENT |gitlab.ruby.process_cpu_seconds.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_cpu_seconds_total")].value.avg()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
+|GitLab |GitLab: Redis: Client requests rate, queues |<p>Number of Redis client requests per second. (Instance: queues)</p> |DEPENDENT |gitlab.redis.client_requests.queues.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_requests_total" && @.labels.storage == "queues")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Redis: Client requests rate, cache |<p>Number of Redis client requests per second. (Instance: cache)</p> |DEPENDENT |gitlab.redis.client_requests.cache.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_requests_total" && @.labels.storage == "cache")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Redis: Client requests rate, shared_state |<p>Number of Redis client requests per second. (Instance: shared_state)</p> |DEPENDENT |gitlab.redis.client_requests.shared_state.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_requests_total" && @.labels.storage == "shared_state")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Redis: Client exceptions rate, queues |<p>Number of Redis client exceptions per second. (Instance: queues)</p> |DEPENDENT |gitlab.redis.client_exceptions.queues.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_exceptions_total" && @.labels.storage == "queues")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Redis: Client exceptions rate, cache |<p>Number of Redis client exceptions per second. (Instance: cache)</p> |DEPENDENT |gitlab.redis.client_exceptions.cache.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_exceptions_total" && @.labels.storage == "cache")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Redis: client exceptions rate, shared_state |<p>Number of Redis client exceptions per second. (Instance: shared_state)</p> |DEPENDENT |gitlab.redis.client_exceptions.shared_state.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_redis_client_exceptions_total" && @.labels.storage == "shared_state")].value.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Cache: Misses rate, total |<p>The cache read miss count.</p> |DEPENDENT |gitlab.cache.misses_total.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_cache_misses_total")].value.sum()`</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Cache: Operations rate, total |<p>The count of cache operations.</p> |DEPENDENT |gitlab.cache.operations_total.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_cache_operations_total")].value.sum()`</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Ruby: CPU usage per second |<p>Average CPU time util in seconds.</p> |DEPENDENT |gitlab.ruby.process_cpu_seconds.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_cpu_seconds_total")].value.avg()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
|GitLab |GitLab: Ruby: Running_threads |<p>Number of running Ruby threads.</p> |DEPENDENT |gitlab.ruby.threads_running<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="gitlab_ruby_threads_running_threads")].value.sum()`</p> |
|GitLab |GitLab: Ruby: File descriptors opened, avg |<p>Average number of opened file descriptors.</p> |DEPENDENT |gitlab.ruby.file_descriptors.avg<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_file_descriptors")].value.avg()`</p> |
|GitLab |GitLab: Ruby: File descriptors opened, max |<p>Maximum number of opened file descriptors.</p> |DEPENDENT |gitlab.ruby.file_descriptors.max<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_file_descriptors")].value.max()`</p> |
|GitLab |GitLab: Ruby: File descriptors opened, min |<p>Minimum number of opened file descriptors.</p> |DEPENDENT |gitlab.ruby.file_descriptors.min<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_file_descriptors")].value.min()`</p> |
|GitLab |GitLab: Ruby: File descriptors, max |<p>Maximum number of open file descriptors per process.</p> |DEPENDENT |gitlab.ruby.process_max_fds<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_max_fds")].value.avg()`</p> |
-|GitLab |GitLab: Ruby: RSS memory, avg |<p>Average RSS Memory usage in bytes.</p> |DEPENDENT |gitlab.ruby.process_resident_memory_bytes.avg<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_resident_memory_bytes")].value.avg()`</p> |
-|GitLab |GitLab: Ruby: RSS memory, min |<p>Minimum RSS Memory usage in bytes.</p> |DEPENDENT |gitlab.ruby.process_resident_memory_bytes.min<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_resident_memory_bytes")].value.min()`</p> |
-|GitLab |GitLab: Ruby: RSS memory, max |<p>Maxinun RSS Memory usage in bytes.</p> |DEPENDENT |gitlab.ruby.process_resident_memory_bytes.max<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_resident_memory_bytes")].value.max()`</p> |
-|GitLab |GitLab: HTTP requests rate, total |<p>Number of requests received into the system.</p> |DEPENDENT |gitlab.http.requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="http_requests_total")].value.sum()`</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: HTTP requests rate, 5xx |<p>Number of handle failures of requests with HTTP-code 5xx.</p> |DEPENDENT |gitlab.http.requests.5xx.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="http_requests_total" && @.labels.status =~ '5..' )].value.sum()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: HTTP requests rate, 4xx |<p>Number of handle failures of requests with code 4XX.</p> |DEPENDENT |gitlab.http.requests.4xx.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="http_requests_total" && @.labels.status =~ '4..' )].value.sum()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|GitLab |GitLab: Transactions per second |<p>Transactions per second (gitlab_transaction_* metrics).</p> |DEPENDENT |gitlab.transactions.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=~"gitlab_transaction_.*_count_total")].value.sum()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
+|GitLab |GitLab: Ruby: RSS memory, avg |<p>Average RSS Memory usage in bytes.</p> |DEPENDENT |gitlab.ruby.process_resident_memory_bytes.avg<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_resident_memory_bytes")].value.avg()`</p> |
+|GitLab |GitLab: Ruby: RSS memory, min |<p>Minimum RSS Memory usage in bytes.</p> |DEPENDENT |gitlab.ruby.process_resident_memory_bytes.min<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_resident_memory_bytes")].value.min()`</p> |
+|GitLab |GitLab: Ruby: RSS memory, max |<p>Maximum RSS Memory usage in bytes.</p> |DEPENDENT |gitlab.ruby.process_resident_memory_bytes.max<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="ruby_process_resident_memory_bytes")].value.max()`</p> |
+|GitLab |GitLab: HTTP requests rate, total |<p>Number of requests received into the system.</p> |DEPENDENT |gitlab.http.requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="http_requests_total")].value.sum()`</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: HTTP requests rate, 5xx |<p>Number of handle failures of requests with HTTP-code 5xx.</p> |DEPENDENT |gitlab.http.requests.5xx.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="http_requests_total" && @.labels.status =~ '5..' )].value.sum()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: HTTP requests rate, 4xx |<p>Number of handle failures of requests with code 4XX.</p> |DEPENDENT |gitlab.http.requests.4xx.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=="http_requests_total" && @.labels.status =~ '4..' )].value.sum()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|GitLab |GitLab: Transactions per second |<p>Transactions per second (gitlab_transaction_* metrics).</p> |DEPENDENT |gitlab.transactions.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=~"gitlab_transaction_.*_count_total")].value.sum()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
|GitLab: Puma stats |GitLab: Active connections |<p>Number of puma threads processing a request.</p> |DEPENDENT |gitlab.puma.active_connections[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=='puma_active_connections')].value.sum()`</p> |
|GitLab: Puma stats |GitLab: Workers |<p>Total number of puma workers.</p> |DEPENDENT |gitlab.puma.workers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=='puma_workers')].value.sum()`</p> |
|GitLab: Puma stats |GitLab: Running workers |<p>The number of booted puma workers.</p> |DEPENDENT |gitlab.puma.running_workers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=='puma_running_workers')].value.sum()`</p> |
@@ -114,25 +115,25 @@ There are no template links in this template.
|GitLab: Unicorn stats |GitLab: Unicorn: Workers |<p>The number of Unicorn workers</p> |DEPENDENT |gitlab.unicorn.unicorn_workers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=='unicorn_workers')].value.sum()`</p> |
|GitLab: Unicorn stats |GitLab: Unicorn: Active connections |<p>The number of active Unicorn connections.</p> |DEPENDENT |gitlab.unicorn.active_connections[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=='unicorn_active_connections')].value.sum()`</p> |
|GitLab: Unicorn stats |GitLab: Unicorn: Queued connections |<p>The number of queued Unicorn connections.</p> |DEPENDENT |gitlab.unicorn.queued_connections[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name=='unicorn_queued_connections')].value.sum()`</p> |
-|Zabbix_raw_items |GitLab: Get instance metrics |<p>-</p> |HTTP_AGENT |gitlab.get_metrics<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED<p>- PROMETHEUS_TO_JSON |
+|Zabbix_raw_items |GitLab: Get instance metrics |<p>-</p> |HTTP_AGENT |gitlab.get_metrics<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- PROMETHEUS_TO_JSON</p> |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|GitLab: Gitlab instance is not able to accept traffic |<p>-</p> |`{TEMPLATE_NAME:gitlab.readiness.last()}=0` |HIGH |<p>**Depends on**:</p><p>- GitLab: Liveness check was failed</p> |
-|GitLab: Liveness check was failed |<p>The application server is not running or Rails Controllers are deadlocked.</p> |`{TEMPLATE_NAME:gitlab.liveness.last()}=0` |HIGH | |
-|GitLab: Version has changed (new version: {ITEM.VALUE}) |<p>GitLab version has changed. Ack to close.</p> |`{TEMPLATE_NAME:gitlab.deployments.version.diff()}=1 and {TEMPLATE_NAME:gitlab.deployments.version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|GitLab: Too many Redis queues client exceptions (over {$GITLAB.REDIS.FAIL.MAX.WARN} for 5m) |<p>"Too many Redis client exceptions during to requests to Redis instance queues."</p> |`{TEMPLATE_NAME:gitlab.redis.client_exceptions.queues.rate.min(5m)}>{$GITLAB.REDIS.FAIL.MAX.WARN}` |WARNING | |
-|GitLab: Too many Redis cache client exceptions (over {$GITLAB.REDIS.FAIL.MAX.WARN} for 5m) |<p>"Too many Redis client exceptions during to requests to Redis instance cache."</p> |`{TEMPLATE_NAME:gitlab.redis.client_exceptions.cache.rate.min(5m)}>{$GITLAB.REDIS.FAIL.MAX.WARN}` |WARNING | |
-|GitLab: Too many Redis shared_state client exceptions (over {$GITLAB.REDIS.FAIL.MAX.WARN} for 5m) |<p>"Too many Redis client exceptions during to requests to Redis instance shared_state."</p> |`{TEMPLATE_NAME:gitlab.redis.client_exceptions.shared_state.rate.min(5m)}>{$GITLAB.REDIS.FAIL.MAX.WARN}` |WARNING | |
-|GitLab: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for metrics for the last 30 minutes</p> |`{TEMPLATE_NAME:gitlab.ruby.threads_running.nodata(30m)}=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- GitLab: Liveness check was failed</p> |
-|GitLab: Current number of open files is too high (over {$GITLAB.OPEN.FDS.MAX.WARN}% for 5m) |<p>-</p> |`{TEMPLATE_NAME:gitlab.ruby.file_descriptors.max.min(5m)}/{GitLab by HTTP:gitlab.ruby.process_max_fds.last()}*100>{$GITLAB.OPEN.FDS.MAX.WARN}` |WARNING | |
-|GitLab: Too many HTTP requests failures (over {$GITLAB.HTTP.FAIL.MAX.WARN} for 5m)' |<p>"Too many requests failed on GitLab instance with 5xx HTTP code"</p> |`{TEMPLATE_NAME:gitlab.http.requests.5xx.rate.min(5m)}>{$GITLAB.HTTP.FAIL.MAX.WARN}` |WARNING | |
-|GitLab: Puma instance thread utilization is too high (over {$GITLAB.PUMA.UTILIZATION.MAX.WARN}% for 5m) |<p>-</p> |`{TEMPLATE_NAME:gitlab.puma.active_connections[{#SINGLETON}].min(5m)}/{GitLab by HTTP:gitlab.puma.max_threads[{#SINGLETON}].last()}*100>{$GITLAB.PUMA.UTILIZATION.MAX.WARN}` |WARNING | |
-|GitLab: Puma is queueing requests (over {$GITLAB.PUMA.QUEUE.MAX.WARN}% for 15m) |<p>-</p> |`{TEMPLATE_NAME:gitlab.puma.queued_connections[{#SINGLETON}].min(15m)}>{$GITLAB.PUMA.QUEUE.MAX.WARN}` |WARNING | |
-|GitLab: Unicorn worker utilization is too high (over {$GITLAB.UNICORN.UTILIZATION.MAX.WARN}% for 5m) |<p>-</p> |`{TEMPLATE_NAME:gitlab.unicorn.active_connections[{#SINGLETON}].min(5m)}/{GitLab by HTTP:gitlab.unicorn.unicorn_workers[{#SINGLETON}].last()}*100>{$GITLAB.UNICORN.UTILIZATION.MAX.WARN}` |WARNING | |
-|GitLab: Unicorn is queueing requests (over {$GITLAB.UNICORN.QUEUE.MAX.WARN}% for 5m) |<p>-</p> |`{TEMPLATE_NAME:gitlab.unicorn.queued_connections[{#SINGLETON}].min(5m)}>{$GITLAB.UNICORN.QUEUE.MAX.WARN}` |WARNING | |
+|GitLab: Gitlab instance is not able to accept traffic |<p>-</p> |`last(/GitLab by HTTP/gitlab.readiness)=0` |HIGH |<p>**Depends on**:</p><p>- GitLab: Liveness check was failed</p> |
+|GitLab: Liveness check was failed |<p>The application server is not running or Rails Controllers are deadlocked.</p> |`last(/GitLab by HTTP/gitlab.liveness)=0` |HIGH | |
+|GitLab: Version has changed (new version: {ITEM.VALUE}) |<p>GitLab version has changed. Ack to close.</p> |`last(/GitLab by HTTP/gitlab.deployments.version,#1)<>last(/GitLab by HTTP/gitlab.deployments.version,#2) and length(last(/GitLab by HTTP/gitlab.deployments.version))>0` |INFO |<p>Manual close: YES</p> |
+|GitLab: Too many Redis queues client exceptions (over {$GITLAB.REDIS.FAIL.MAX.WARN} for 5m) |<p>"Too many Redis client exceptions during to requests to Redis instance queues."</p> |`min(/GitLab by HTTP/gitlab.redis.client_exceptions.queues.rate,5m)>{$GITLAB.REDIS.FAIL.MAX.WARN}` |WARNING | |
+|GitLab: Too many Redis cache client exceptions (over {$GITLAB.REDIS.FAIL.MAX.WARN} for 5m) |<p>"Too many Redis client exceptions during to requests to Redis instance cache."</p> |`min(/GitLab by HTTP/gitlab.redis.client_exceptions.cache.rate,5m)>{$GITLAB.REDIS.FAIL.MAX.WARN}` |WARNING | |
+|GitLab: Too many Redis shared_state client exceptions (over {$GITLAB.REDIS.FAIL.MAX.WARN} for 5m) |<p>"Too many Redis client exceptions during to requests to Redis instance shared_state."</p> |`min(/GitLab by HTTP/gitlab.redis.client_exceptions.shared_state.rate,5m)>{$GITLAB.REDIS.FAIL.MAX.WARN}` |WARNING | |
+|GitLab: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for metrics for the last 30 minutes</p> |`nodata(/GitLab by HTTP/gitlab.ruby.threads_running,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- GitLab: Liveness check was failed</p> |
+|GitLab: Current number of open files is too high (over {$GITLAB.OPEN.FDS.MAX.WARN}% for 5m) |<p>-</p> |`min(/GitLab by HTTP/gitlab.ruby.file_descriptors.max,5m)/last(/GitLab by HTTP/gitlab.ruby.process_max_fds)*100>{$GITLAB.OPEN.FDS.MAX.WARN}` |WARNING | |
+|GitLab: Too many HTTP requests failures (over {$GITLAB.HTTP.FAIL.MAX.WARN} for 5m)' |<p>"Too many requests failed on GitLab instance with 5xx HTTP code"</p> |`min(/GitLab by HTTP/gitlab.http.requests.5xx.rate,5m)>{$GITLAB.HTTP.FAIL.MAX.WARN}` |WARNING | |
+|GitLab: Puma instance thread utilization is too high (over {$GITLAB.PUMA.UTILIZATION.MAX.WARN}% for 5m) |<p>-</p> |`min(/GitLab by HTTP/gitlab.puma.active_connections[{#SINGLETON}],5m)/last(/GitLab by HTTP/gitlab.puma.max_threads[{#SINGLETON}])*100>{$GITLAB.PUMA.UTILIZATION.MAX.WARN}` |WARNING | |
+|GitLab: Puma is queueing requests (over {$GITLAB.PUMA.QUEUE.MAX.WARN}% for 15m) |<p>-</p> |`min(/GitLab by HTTP/gitlab.puma.queued_connections[{#SINGLETON}],15m)>{$GITLAB.PUMA.QUEUE.MAX.WARN}` |WARNING | |
+|GitLab: Unicorn worker utilization is too high (over {$GITLAB.UNICORN.UTILIZATION.MAX.WARN}% for 5m) |<p>-</p> |`min(/GitLab by HTTP/gitlab.unicorn.active_connections[{#SINGLETON}],5m)/last(/GitLab by HTTP/gitlab.unicorn.unicorn_workers[{#SINGLETON}])*100>{$GITLAB.UNICORN.UTILIZATION.MAX.WARN}` |WARNING | |
+|GitLab: Unicorn is queueing requests (over {$GITLAB.UNICORN.QUEUE.MAX.WARN}% for 5m) |<p>-</p> |`min(/GitLab by HTTP/gitlab.unicorn.queued_connections[{#SINGLETON}],5m)>{$GITLAB.UNICORN.QUEUE.MAX.WARN}` |WARNING | |
## Feedback
diff --git a/templates/app/gitlab_http/template_app_gitlab_http.yaml b/templates/app/gitlab_http/template_app_gitlab_http.yaml
index eefa1e9e3ba..d12dcfb65dc 100644
--- a/templates/app/gitlab_http/template_app_gitlab_http.yaml
+++ b/templates/app/gitlab_http/template_app_gitlab_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-06-15T14:01:36Z'
+ date: '2021-12-19T15:19:37Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -16,11 +16,11 @@ zabbix_export:
To access the metrics, the client IP address must be explicitly allowed. See https://docs.gitlab.com/ee/administration/monitoring/ip_whitelist.html.
Don't forget change macros {$GITLAB.URL}.
- Some metrics may not be collected depending on your Gitlab instance version and configuration. See (Gitlab’s documentation[)https://docs.gitlab.com/ee/administration/monitoring/prometheus/gitlab_metrics.html] for further information about its metric collection.
+ Some metrics may not be collected depending on your Gitlab instance version and configuration. See (Gitlab's documentation[)https://docs.gitlab.com/ee/administration/monitoring/prometheus/gitlab_metrics.html] for further information about its metric collection.
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -377,7 +377,6 @@ zabbix_export:
parameters:
- 30m
url: '{$GITLAB.URL}/-/liveness'
- status_codes: ''
tags:
-
tag: Application
@@ -846,7 +845,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: B
- description: "Average RSS\tMemory usage in bytes."
+ description: 'Average RSS Memory usage in bytes.'
preprocessing:
-
type: JSONPATH
@@ -867,7 +866,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: B
- description: "Maxinun RSS\tMemory usage in bytes."
+ description: 'Maximum RSS Memory usage in bytes.'
preprocessing:
-
type: JSONPATH
@@ -888,7 +887,7 @@ zabbix_export:
history: 7d
value_type: FLOAT
units: B
- description: "Minimum RSS\tMemory usage in bytes."
+ description: 'Minimum RSS Memory usage in bytes.'
preprocessing:
-
type: JSONPATH
@@ -1037,7 +1036,7 @@ zabbix_export:
-
type: JSONPATH
parameters:
- - "$[?(@.name==\"upload_file_does_not_exist\t\")].value.first()"
+ - '$[?(@.name=="upload_file_does_not_exist")].value.first()'
error_handler: DISCARD_VALUE
master_item:
key: gitlab.get_metrics
diff --git a/templates/app/hadoop_http/README.md b/templates/app/hadoop_http/README.md
index 6274dcaf1e6..a879feebc33 100644
--- a/templates/app/hadoop_http/README.md
+++ b/templates/app/hadoop_http/README.md
@@ -4,15 +4,14 @@
## Overview
For Zabbix version: 5.4 and higher
-The template for monitoring Hadoop over HTTP that works without any external scripts.
-It collects metrics by polling the Hadoop API remotely using an HTTP agent and JSONPath preprocessing.
-Zabbix server (or proxy) execute direct requests to ResourceManager, NodeManagers, NameNode, DataNodes APIs.
+The template for monitoring Hadoop over HTTP that works without any external scripts.
+It collects metrics by polling the Hadoop API remotely using an HTTP agent and JSONPath preprocessing.
+Zabbix server (or proxy) execute direct requests to ResourceManager, NodeManagers, NameNode, DataNodes APIs.
All metrics are collected at once, thanks to the Zabbix bulk data collection.
This template was tested on:
-- Zabbix, version 5.0 and later
- Hadoop, version 3.1 and later
## Setup
@@ -27,15 +26,15 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|--------------------------------------------------|-----------------------------------------------------------------------------------------------------|-------------------|
-| {$HADOOP.CAPACITY_REMAINING.MIN.WARN} | <p>The Hadoop cluster capacity remaining percent for trigger expression.</p> | `20` |
-| {$HADOOP.NAMENODE.HOST} | <p>The Hadoop NameNode host IP address or FQDN.</p> | `NameNode` |
-| {$HADOOP.NAMENODE.PORT} | <p>The Hadoop NameNode Web-UI port.</p> | `9870` |
-| {$HADOOP.NAMENODE.RESPONSE_TIME.MAX.WARN} | <p>The Hadoop NameNode API page maximum response time in seconds for trigger expression.</p> | `10s` |
-| {$HADOOP.RESOURCEMANAGER.HOST} | <p>The Hadoop ResourceManager host IP address or FQDN.</p> | `ResourceManager` |
-| {$HADOOP.RESOURCEMANAGER.PORT} | <p>The Hadoop ResourceManager Web-UI port.</p> | `8088` |
-| {$HADOOP.RESOURCEMANAGER.RESPONSE_TIME.MAX.WARN} | <p>The Hadoop ResourceManager API page maximum response time in seconds for trigger expression.</p> | `10s` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$HADOOP.CAPACITY_REMAINING.MIN.WARN} |<p>The Hadoop cluster capacity remaining percent for trigger expression.</p> |`20` |
+|{$HADOOP.NAMENODE.HOST} |<p>The Hadoop NameNode host IP address or FQDN.</p> |`NameNode` |
+|{$HADOOP.NAMENODE.PORT} |<p>The Hadoop NameNode Web-UI port.</p> |`9870` |
+|{$HADOOP.NAMENODE.RESPONSE_TIME.MAX.WARN} |<p>The Hadoop NameNode API page maximum response time in seconds for trigger expression.</p> |`10s` |
+|{$HADOOP.RESOURCEMANAGER.HOST} |<p>The Hadoop ResourceManager host IP address or FQDN.</p> |`ResourceManager` |
+|{$HADOOP.RESOURCEMANAGER.PORT} |<p>The Hadoop ResourceManager Web-UI port.</p> |`8088` |
+|{$HADOOP.RESOURCEMANAGER.RESPONSE_TIME.MAX.WARN} |<p>The Hadoop ResourceManager API page maximum response time in seconds for trigger expression.</p> |`10s` |
## Template links
@@ -43,97 +42,97 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|------------------------|-------------|------------|------------------------------------------------------------------------------------------------------------------------|
-| Node manager discovery | <p>-</p> | HTTP_AGENT | hadoop.nodemanager.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Data node discovery | <p>-</p> | HTTP_AGENT | hadoop.datanode.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Node manager discovery |<p>-</p> |HTTP_AGENT |hadoop.nodemanager.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Data node discovery |<p>-</p> |HTTP_AGENT |hadoop.datanode.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Hadoop | ResourceManager: Service status | <p>Hadoop ResourceManager API port availability.</p> | SIMPLE | net.tcp.service["tcp","{$HADOOP.RESOURCEMANAGER.HOST}","{$HADOOP.RESOURCEMANAGER.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| Hadoop | ResourceManager: Service response time | <p>Hadoop ResourceManager API performance.</p> | SIMPLE | net.tcp.service.perf["tcp","{$HADOOP.RESOURCEMANAGER.HOST}","{$HADOOP.RESOURCEMANAGER.PORT}"] |
-| Hadoop | ResourceManager: Uptime | | DEPENDENT | hadoop.resourcemanager.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Runtime')].Uptime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-| Hadoop | ResourceManager: RPC queue & processing time | <p>Average time spent on processing RPC requests.</p> | DEPENDENT | hadoop.resourcemanager.rpc_processing_time_avg<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=RpcActivityForPort8031')].RpcProcessingTimeAvgTime.first()`</p> |
-| Hadoop | ResourceManager: Active NMs | <p>Number of Active NodeManagers.</p> | DEPENDENT | hadoop.resourcemanager.num_active_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumActiveNMs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | ResourceManager: Decommissioning NMs | <p>Number of Decommissioning NodeManagers.</p> | DEPENDENT | hadoop.resourcemanager.num_decommissioning_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumDecommissioningNMs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | ResourceManager: Decommissioned NMs | <p>Number of Decommissioned NodeManagers.</p> | DEPENDENT | hadoop.resourcemanager.num_decommissioned_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumDecommissionedNMs.first()`</p> |
-| Hadoop | ResourceManager: Lost NMs | <p>Number of Lost NodeManagers.</p> | DEPENDENT | hadoop.resourcemanager.num_lost_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumLostNMs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | ResourceManager: Unhealthy NMs | <p>Number of Unhealthy NodeManagers.</p> | DEPENDENT | hadoop.resourcemanager.num_unhealthy_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumUnhealthyNMs.first()`</p> |
-| Hadoop | ResourceManager: Rebooted NMs | <p>Number of Rebooted NodeManagers.</p> | DEPENDENT | hadoop.resourcemanager.num_rebooted_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumRebootedNMs.first()`</p> |
-| Hadoop | ResourceManager: Shutdown NMs | <p>Number of Shutdown NodeManagers.</p> | DEPENDENT | hadoop.resourcemanager.num_shutdown_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumShutdownNMs.first()`</p> |
-| Hadoop | NameNode: Service status | <p>Hadoop NameNode API port availability.</p> | SIMPLE | net.tcp.service["tcp","{$HADOOP.NAMENODE.HOST}","{$HADOOP.NAMENODE.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| Hadoop | NameNode: Service response time | <p>Hadoop NameNode API performance.</p> | SIMPLE | net.tcp.service.perf["tcp","{$HADOOP.NAMENODE.HOST}","{$HADOOP.NAMENODE.PORT}"] |
-| Hadoop | NameNode: Uptime | | DEPENDENT | hadoop.namenode.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Runtime')].Uptime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-| Hadoop | NameNode: RPC queue & processing time | <p>Average time spent on processing RPC requests.</p> | DEPENDENT | hadoop.namenode.rpc_processing_time_avg<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=RpcActivityForPort9000')].RpcProcessingTimeAvgTime.first()`</p> |
-| Hadoop | NameNode: Block Pool Renaming | | DEPENDENT | hadoop.namenode.percent_block_pool_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=NameNodeInfo')].PercentBlockPoolUsed.first()`</p> |
-| Hadoop | NameNode: Transactions since last checkpoint | <p>Total number of transactions since last checkpoint.</p> | DEPENDENT | hadoop.namenode.transactions_since_last_checkpoint<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].TransactionsSinceLastCheckpoint.first()`</p> |
-| Hadoop | NameNode: Percent capacity remaining | <p>Available capacity in percent.</p> | DEPENDENT | hadoop.namenode.percent_remaining<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=NameNodeInfo')].PercentRemaining.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | NameNode: Capacity remaining | <p>Available capacity.</p> | DEPENDENT | hadoop.namenode.capacity_remaining<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].CapacityRemaining.first()`</p> |
-| Hadoop | NameNode: Corrupt blocks | <p>Number of corrupt blocks.</p> | DEPENDENT | hadoop.namenode.corrupt_blocks<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].CorruptBlocks.first()`</p> |
-| Hadoop | NameNode: Missing blocks | <p>Number of missing blocks.</p> | DEPENDENT | hadoop.namenode.missing_blocks<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].MissingBlocks.first()`</p> |
-| Hadoop | NameNode: Failed volumes | <p>Number of failed volumes.</p> | DEPENDENT | hadoop.namenode.volume_failures_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].VolumeFailuresTotal.first()`</p> |
-| Hadoop | NameNode: Alive DataNodes | <p>Count of alive DataNodes.</p> | DEPENDENT | hadoop.namenode.num_live_data_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].NumLiveDataNodes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | NameNode: Dead DataNodes | <p>Count of dead DataNodes.</p> | DEPENDENT | hadoop.namenode.num_dead_data_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].NumDeadDataNodes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | NameNode: Stale DataNodes | <p>DataNodes that do not send a heartbeat within 30 seconds are marked as "stale".</p> | DEPENDENT | hadoop.namenode.num_stale_data_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].StaleDataNodes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | NameNode: Total files | <p>Total count of files tracked by the NameNode.</p> | DEPENDENT | hadoop.namenode.files_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].FilesTotal.first()`</p> |
-| Hadoop | NameNode: Total load | <p>The current number of concurrent file accesses (read/write) across all DataNodes.</p> | DEPENDENT | hadoop.namenode.total_load<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].TotalLoad.first()`</p> |
-| Hadoop | NameNode: Blocks allocable | <p>Maximum number of blocks allocable.</p> | DEPENDENT | hadoop.namenode.block_capacity<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].BlockCapacity.first()`</p> |
-| Hadoop | NameNode: Total blocks | <p>Count of blocks tracked by NameNode.</p> | DEPENDENT | hadoop.namenode.blocks_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].BlocksTotal.first()`</p> |
-| Hadoop | NameNode: Under-replicated blocks | <p>The number of blocks with insufficient replication.</p> | DEPENDENT | hadoop.namenode.under_replicated_blocks<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].UnderReplicatedBlocks.first()`</p> |
-| Hadoop | {#HOSTNAME}: RPC queue & processing time | <p>Average time spent on processing RPC requests.</p> | DEPENDENT | hadoop.nodemanager.rpc_processing_time_avg[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NodeManager,name=RpcActivityForPort8040')].RpcProcessingTimeAvgTime.first()`</p> |
-| Hadoop | {#HOSTNAME}: Container launch avg duration | | DEPENDENT | hadoop.nodemanager.container_launch_duration_avg[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NodeManager,name=NodeManagerMetrics')].ContainerLaunchDurationAvgTime.first()`</p> |
-| Hadoop | {#HOSTNAME}: JVM Threads | <p>The number of JVM threads.</p> | DEPENDENT | hadoop.nodemanager.jvm.threads[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Threading')].ThreadCount.first()`</p> |
-| Hadoop | {#HOSTNAME}: JVM Garbage collection time | <p>The JVM garbage collection time in milliseconds.</p> | DEPENDENT | hadoop.nodemanager.jvm.gc_time[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NodeManager,name=JvmMetrics')].GcTimeMillis.first()`</p> |
-| Hadoop | {#HOSTNAME}: JVM Heap usage | <p>The JVM heap usage in MBytes.</p> | DEPENDENT | hadoop.nodemanager.jvm.mem_heap_used[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NodeManager,name=JvmMetrics')].MemHeapUsedM.first()`</p> |
-| Hadoop | {#HOSTNAME}: Uptime | | DEPENDENT | hadoop.nodemanager.uptime[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Runtime')].Uptime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-| Hadoop | {#HOSTNAME}: State | <p>State of the node - valid values are: NEW, RUNNING, UNHEALTHY, DECOMMISSIONING, DECOMMISSIONED, LOST, REBOOTED, SHUTDOWN.</p> | DEPENDENT | hadoop.nodemanager.state[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].State.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | {#HOSTNAME}: Version | | DEPENDENT | hadoop.nodemanager.version[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].NodeManagerVersion.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | {#HOSTNAME}: Number of containers | | DEPENDENT | hadoop.nodemanager.numcontainers[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].NumContainers.first()`</p> |
-| Hadoop | {#HOSTNAME}: Used memory | | DEPENDENT | hadoop.nodemanager.usedmemory[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].UsedMemoryMB.first()`</p> |
-| Hadoop | {#HOSTNAME}: Available memory | | DEPENDENT | hadoop.nodemanager.availablememory[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].AvailableMemoryMB.first()`</p> |
-| Hadoop | {#HOSTNAME}: Remaining | <p>Remaining disk space.</p> | DEPENDENT | hadoop.datanode.remaining[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=FSDatasetState')].Remaining.first()`</p> |
-| Hadoop | {#HOSTNAME}: Used | <p>Used disk space.</p> | DEPENDENT | hadoop.datanode.dfs_used[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=FSDatasetState')].DfsUsed.first()`</p> |
-| Hadoop | {#HOSTNAME}: Number of failed volumes | <p>Number of failed storage volumes.</p> | DEPENDENT | hadoop.datanode.numfailedvolumes[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=FSDatasetState')].NumFailedVolumes.first()`</p> |
-| Hadoop | {#HOSTNAME}: JVM Threads | <p>The number of JVM threads.</p> | DEPENDENT | hadoop.datanode.jvm.threads[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Threading')].ThreadCount.first()`</p> |
-| Hadoop | {#HOSTNAME}: JVM Garbage collection time | <p>The JVM garbage collection time in milliseconds.</p> | DEPENDENT | hadoop.datanode.jvm.gc_time[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=JvmMetrics')].GcTimeMillis.first()`</p> |
-| Hadoop | {#HOSTNAME}: JVM Heap usage | <p>The JVM heap usage in MBytes.</p> | DEPENDENT | hadoop.datanode.jvm.mem_heap_used[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=JvmMetrics')].MemHeapUsedM.first()`</p> |
-| Hadoop | {#HOSTNAME}: Uptime | | DEPENDENT | hadoop.datanode.uptime[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Runtime')].Uptime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-| Hadoop | {#HOSTNAME}: Version | <p>DataNode software version.</p> | DEPENDENT | hadoop.datanode.version[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.HostName=='{#HOSTNAME}')].version.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | {#HOSTNAME}: Admin state | <p>Administrative state.</p> | DEPENDENT | hadoop.datanode.admin_state[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.HostName=='{#HOSTNAME}')].adminState.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Hadoop | {#HOSTNAME}: Oper state | <p>Operational state.</p> | DEPENDENT | hadoop.datanode.oper_state[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.HostName=='{#HOSTNAME}')].operState.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-| Zabbix_raw_items | Get ResourceManager stats | <p>-</p> | HTTP_AGENT | hadoop.resourcemanager.get |
-| Zabbix_raw_items | Get NameNode stats | <p>-</p> | HTTP_AGENT | hadoop.namenode.get |
-| Zabbix_raw_items | Get NodeManagers states | <p>-</p> | HTTP_AGENT | hadoop.nodemanagers.get<p>**Preprocessing**:</p><p>- JAVASCRIPT: `return JSON.stringify(JSON.parse(JSON.parse(value).beans[0].LiveNodeManagers))`</p> |
-| Zabbix_raw_items | Get DataNodes states | <p>-</p> | HTTP_AGENT | hadoop.datanodes.get<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Zabbix_raw_items | Hadoop NodeManager {#HOSTNAME}: Get stats | | HTTP_AGENT | hadoop.nodemanager.get[{#HOSTNAME}] |
-| Zabbix_raw_items | Hadoop DataNode {#HOSTNAME}: Get stats | | HTTP_AGENT | hadoop.datanode.get[{#HOSTNAME}] |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|Hadoop |ResourceManager: Service status |<p>Hadoop ResourceManager API port availability.</p> |SIMPLE |net.tcp.service["tcp","{$HADOOP.RESOURCEMANAGER.HOST}","{$HADOOP.RESOURCEMANAGER.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|Hadoop |ResourceManager: Service response time |<p>Hadoop ResourceManager API performance.</p> |SIMPLE |net.tcp.service.perf["tcp","{$HADOOP.RESOURCEMANAGER.HOST}","{$HADOOP.RESOURCEMANAGER.PORT}"] |
+|Hadoop |ResourceManager: Uptime |<p>-</p> |DEPENDENT |hadoop.resourcemanager.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Runtime')].Uptime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
+|Hadoop |ResourceManager: RPC queue & processing time |<p>Average time spent on processing RPC requests.</p> |DEPENDENT |hadoop.resourcemanager.rpc_processing_time_avg<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=RpcActivityForPort8031')].RpcProcessingTimeAvgTime.first()`</p> |
+|Hadoop |ResourceManager: Active NMs |<p>Number of Active NodeManagers.</p> |DEPENDENT |hadoop.resourcemanager.num_active_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumActiveNMs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |ResourceManager: Decommissioning NMs |<p>Number of Decommissioning NodeManagers.</p> |DEPENDENT |hadoop.resourcemanager.num_decommissioning_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumDecommissioningNMs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |ResourceManager: Decommissioned NMs |<p>Number of Decommissioned NodeManagers.</p> |DEPENDENT |hadoop.resourcemanager.num_decommissioned_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumDecommissionedNMs.first()`</p> |
+|Hadoop |ResourceManager: Lost NMs |<p>Number of Lost NodeManagers.</p> |DEPENDENT |hadoop.resourcemanager.num_lost_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumLostNMs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |ResourceManager: Unhealthy NMs |<p>Number of Unhealthy NodeManagers.</p> |DEPENDENT |hadoop.resourcemanager.num_unhealthy_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumUnhealthyNMs.first()`</p> |
+|Hadoop |ResourceManager: Rebooted NMs |<p>Number of Rebooted NodeManagers.</p> |DEPENDENT |hadoop.resourcemanager.num_rebooted_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumRebootedNMs.first()`</p> |
+|Hadoop |ResourceManager: Shutdown NMs |<p>Number of Shutdown NodeManagers.</p> |DEPENDENT |hadoop.resourcemanager.num_shutdown_nm<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=ResourceManager,name=ClusterMetrics')].NumShutdownNMs.first()`</p> |
+|Hadoop |NameNode: Service status |<p>Hadoop NameNode API port availability.</p> |SIMPLE |net.tcp.service["tcp","{$HADOOP.NAMENODE.HOST}","{$HADOOP.NAMENODE.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|Hadoop |NameNode: Service response time |<p>Hadoop NameNode API performance.</p> |SIMPLE |net.tcp.service.perf["tcp","{$HADOOP.NAMENODE.HOST}","{$HADOOP.NAMENODE.PORT}"] |
+|Hadoop |NameNode: Uptime |<p>-</p> |DEPENDENT |hadoop.namenode.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Runtime')].Uptime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
+|Hadoop |NameNode: RPC queue & processing time |<p>Average time spent on processing RPC requests.</p> |DEPENDENT |hadoop.namenode.rpc_processing_time_avg<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=RpcActivityForPort9000')].RpcProcessingTimeAvgTime.first()`</p> |
+|Hadoop |NameNode: Block Pool Renaming |<p>-</p> |DEPENDENT |hadoop.namenode.percent_block_pool_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=NameNodeInfo')].PercentBlockPoolUsed.first()`</p> |
+|Hadoop |NameNode: Transactions since last checkpoint |<p>Total number of transactions since last checkpoint.</p> |DEPENDENT |hadoop.namenode.transactions_since_last_checkpoint<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].TransactionsSinceLastCheckpoint.first()`</p> |
+|Hadoop |NameNode: Percent capacity remaining |<p>Available capacity in percent.</p> |DEPENDENT |hadoop.namenode.percent_remaining<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=NameNodeInfo')].PercentRemaining.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |NameNode: Capacity remaining |<p>Available capacity.</p> |DEPENDENT |hadoop.namenode.capacity_remaining<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].CapacityRemaining.first()`</p> |
+|Hadoop |NameNode: Corrupt blocks |<p>Number of corrupt blocks.</p> |DEPENDENT |hadoop.namenode.corrupt_blocks<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].CorruptBlocks.first()`</p> |
+|Hadoop |NameNode: Missing blocks |<p>Number of missing blocks.</p> |DEPENDENT |hadoop.namenode.missing_blocks<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].MissingBlocks.first()`</p> |
+|Hadoop |NameNode: Failed volumes |<p>Number of failed volumes.</p> |DEPENDENT |hadoop.namenode.volume_failures_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].VolumeFailuresTotal.first()`</p> |
+|Hadoop |NameNode: Alive DataNodes |<p>Count of alive DataNodes.</p> |DEPENDENT |hadoop.namenode.num_live_data_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].NumLiveDataNodes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |NameNode: Dead DataNodes |<p>Count of dead DataNodes.</p> |DEPENDENT |hadoop.namenode.num_dead_data_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].NumDeadDataNodes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |NameNode: Stale DataNodes |<p>DataNodes that do not send a heartbeat within 30 seconds are marked as "stale".</p> |DEPENDENT |hadoop.namenode.num_stale_data_nodes<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].StaleDataNodes.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |NameNode: Total files |<p>Total count of files tracked by the NameNode.</p> |DEPENDENT |hadoop.namenode.files_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].FilesTotal.first()`</p> |
+|Hadoop |NameNode: Total load |<p>The current number of concurrent file accesses (read/write) across all DataNodes.</p> |DEPENDENT |hadoop.namenode.total_load<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].TotalLoad.first()`</p> |
+|Hadoop |NameNode: Blocks allocable |<p>Maximum number of blocks allocable.</p> |DEPENDENT |hadoop.namenode.block_capacity<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].BlockCapacity.first()`</p> |
+|Hadoop |NameNode: Total blocks |<p>Count of blocks tracked by NameNode.</p> |DEPENDENT |hadoop.namenode.blocks_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].BlocksTotal.first()`</p> |
+|Hadoop |NameNode: Under-replicated blocks |<p>The number of blocks with insufficient replication.</p> |DEPENDENT |hadoop.namenode.under_replicated_blocks<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NameNode,name=FSNamesystem')].UnderReplicatedBlocks.first()`</p> |
+|Hadoop |{#HOSTNAME}: RPC queue & processing time |<p>Average time spent on processing RPC requests.</p> |DEPENDENT |hadoop.nodemanager.rpc_processing_time_avg[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NodeManager,name=RpcActivityForPort8040')].RpcProcessingTimeAvgTime.first()`</p> |
+|Hadoop |{#HOSTNAME}: Container launch avg duration |<p>-</p> |DEPENDENT |hadoop.nodemanager.container_launch_duration_avg[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NodeManager,name=NodeManagerMetrics')].ContainerLaunchDurationAvgTime.first()`</p> |
+|Hadoop |{#HOSTNAME}: JVM Threads |<p>The number of JVM threads.</p> |DEPENDENT |hadoop.nodemanager.jvm.threads[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Threading')].ThreadCount.first()`</p> |
+|Hadoop |{#HOSTNAME}: JVM Garbage collection time |<p>The JVM garbage collection time in milliseconds.</p> |DEPENDENT |hadoop.nodemanager.jvm.gc_time[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NodeManager,name=JvmMetrics')].GcTimeMillis.first()`</p> |
+|Hadoop |{#HOSTNAME}: JVM Heap usage |<p>The JVM heap usage in MBytes.</p> |DEPENDENT |hadoop.nodemanager.jvm.mem_heap_used[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=NodeManager,name=JvmMetrics')].MemHeapUsedM.first()`</p> |
+|Hadoop |{#HOSTNAME}: Uptime |<p>-</p> |DEPENDENT |hadoop.nodemanager.uptime[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Runtime')].Uptime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
+|Hadoop |{#HOSTNAME}: State |<p>State of the node - valid values are: NEW, RUNNING, UNHEALTHY, DECOMMISSIONING, DECOMMISSIONED, LOST, REBOOTED, SHUTDOWN.</p> |DEPENDENT |hadoop.nodemanager.state[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].State.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |{#HOSTNAME}: Version |<p>-</p> |DEPENDENT |hadoop.nodemanager.version[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].NodeManagerVersion.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |{#HOSTNAME}: Number of containers |<p>-</p> |DEPENDENT |hadoop.nodemanager.numcontainers[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].NumContainers.first()`</p> |
+|Hadoop |{#HOSTNAME}: Used memory |<p>-</p> |DEPENDENT |hadoop.nodemanager.usedmemory[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].UsedMemoryMB.first()`</p> |
+|Hadoop |{#HOSTNAME}: Available memory |<p>-</p> |DEPENDENT |hadoop.nodemanager.availablememory[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.HostName=='{#HOSTNAME}')].AvailableMemoryMB.first()`</p> |
+|Hadoop |{#HOSTNAME}: Remaining |<p>Remaining disk space.</p> |DEPENDENT |hadoop.datanode.remaining[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=FSDatasetState')].Remaining.first()`</p> |
+|Hadoop |{#HOSTNAME}: Used |<p>Used disk space.</p> |DEPENDENT |hadoop.datanode.dfs_used[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=FSDatasetState')].DfsUsed.first()`</p> |
+|Hadoop |{#HOSTNAME}: Number of failed volumes |<p>Number of failed storage volumes.</p> |DEPENDENT |hadoop.datanode.numfailedvolumes[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=FSDatasetState')].NumFailedVolumes.first()`</p> |
+|Hadoop |{#HOSTNAME}: JVM Threads |<p>The number of JVM threads.</p> |DEPENDENT |hadoop.datanode.jvm.threads[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Threading')].ThreadCount.first()`</p> |
+|Hadoop |{#HOSTNAME}: JVM Garbage collection time |<p>The JVM garbage collection time in milliseconds.</p> |DEPENDENT |hadoop.datanode.jvm.gc_time[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=JvmMetrics')].GcTimeMillis.first()`</p> |
+|Hadoop |{#HOSTNAME}: JVM Heap usage |<p>The JVM heap usage in MBytes.</p> |DEPENDENT |hadoop.datanode.jvm.mem_heap_used[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='Hadoop:service=DataNode,name=JvmMetrics')].MemHeapUsedM.first()`</p> |
+|Hadoop |{#HOSTNAME}: Uptime |<p>-</p> |DEPENDENT |hadoop.datanode.uptime[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.beans[?(@.name=='java.lang:type=Runtime')].Uptime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
+|Hadoop |{#HOSTNAME}: Version |<p>DataNode software version.</p> |DEPENDENT |hadoop.datanode.version[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.HostName=='{#HOSTNAME}')].version.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |{#HOSTNAME}: Admin state |<p>Administrative state.</p> |DEPENDENT |hadoop.datanode.admin_state[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.HostName=='{#HOSTNAME}')].adminState.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hadoop |{#HOSTNAME}: Oper state |<p>Operational state.</p> |DEPENDENT |hadoop.datanode.oper_state[{#HOSTNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.HostName=='{#HOSTNAME}')].operState.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Zabbix_raw_items |Get ResourceManager stats |<p>-</p> |HTTP_AGENT |hadoop.resourcemanager.get |
+|Zabbix_raw_items |Get NameNode stats |<p>-</p> |HTTP_AGENT |hadoop.namenode.get |
+|Zabbix_raw_items |Get NodeManagers states |<p>-</p> |HTTP_AGENT |hadoop.nodemanagers.get<p>**Preprocessing**:</p><p>- JAVASCRIPT: `return JSON.stringify(JSON.parse(JSON.parse(value).beans[0].LiveNodeManagers))`</p> |
+|Zabbix_raw_items |Get DataNodes states |<p>-</p> |HTTP_AGENT |hadoop.datanodes.get<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Zabbix_raw_items |Hadoop NodeManager {#HOSTNAME}: Get stats |<p>-</p> |HTTP_AGENT |hadoop.nodemanager.get[{#HOSTNAME}] |
+|Zabbix_raw_items |Hadoop DataNode {#HOSTNAME}: Get stats |<p>-</p> |HTTP_AGENT |hadoop.datanode.get[{#HOSTNAME}] |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|-------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------------------------------------------------------------------------------------------------|
-| ResourceManager: Service is unavailable | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service["tcp","{$HADOOP.RESOURCEMANAGER.HOST}","{$HADOOP.RESOURCEMANAGER.PORT}"].last()}=0` | AVERAGE | <p>Manual close: YES</p> |
-| ResourceManager: Service response time is too high (over {$HADOOP.RESOURCEMANAGER.RESPONSE_TIME.MAX.WARN} for 5m) | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service.perf["tcp","{$HADOOP.RESOURCEMANAGER.HOST}","{$HADOOP.RESOURCEMANAGER.PORT}"].min(5m)}>{$HADOOP.RESOURCEMANAGER.RESPONSE_TIME.MAX.WARN}` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- ResourceManager: Service is unavailable</p> |
-| ResourceManager: Service has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:hadoop.resourcemanager.uptime.last()}<10m` | INFO | <p>Manual close: YES</p> |
-| ResourceManager: Failed to fetch ResourceManager API page (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes.</p> | `{TEMPLATE_NAME:hadoop.resourcemanager.uptime.nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- ResourceManager: Service is unavailable</p> |
-| ResourceManager: Cluster has no active NodeManagers | <p>Cluster is unable to execute any jobs without at least one NodeManager.</p> | `{TEMPLATE_NAME:hadoop.resourcemanager.num_active_nm.max(5m)}=0` | HIGH | |
-| ResourceManager: Cluster has unhealthy NodeManagers | <p>YARN considers any node with disk utilization exceeding the value specified under the property yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage (in yarn-site.xml) to be unhealthy. Ample disk space is critical to ensure uninterrupted operation of a Hadoop cluster, and large numbers of unhealthyNodes (the number to alert on depends on the size of your cluster) should be quickly investigated and resolved.</p> | `{TEMPLATE_NAME:hadoop.resourcemanager.num_unhealthy_nm.min(15m)}>0` | AVERAGE | |
-| NameNode: Service is unavailable | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service["tcp","{$HADOOP.NAMENODE.HOST}","{$HADOOP.NAMENODE.PORT}"].last()}=0` | AVERAGE | <p>Manual close: YES</p> |
-| NameNode: Service response time is too high (over {$HADOOP.NAMENODE.RESPONSE_TIME.MAX.WARN} for 5m) | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service.perf["tcp","{$HADOOP.NAMENODE.HOST}","{$HADOOP.NAMENODE.PORT}"].min(5m)}>{$HADOOP.NAMENODE.RESPONSE_TIME.MAX.WARN}` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- NameNode: Service is unavailable</p> |
-| NameNode: Service has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:hadoop.namenode.uptime.last()}<10m` | INFO | <p>Manual close: YES</p> |
-| NameNode: Failed to fetch NameNode API page (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes.</p> | `{TEMPLATE_NAME:hadoop.namenode.uptime.nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- NameNode: Service is unavailable</p> |
-| NameNode: Cluster capacity remaining is low (below {$HADOOP.CAPACITY_REMAINING.MIN.WARN}% for 15m) | <p>A good practice is to ensure that disk use never exceeds 80 percent capacity.</p> | `{TEMPLATE_NAME:hadoop.namenode.percent_remaining.max(15m)}<{$HADOOP.CAPACITY_REMAINING.MIN.WARN}` | WARNING | |
-| NameNode: Cluster has missing blocks | <p>A missing block is far worse than a corrupt block, because a missing block cannot be recovered by copying a replica.</p> | `{TEMPLATE_NAME:hadoop.namenode.missing_blocks.min(15m)}>0` | AVERAGE | |
-| NameNode: Cluster has volume failures | <p>HDFS now allows for disks to fail in place, without affecting DataNode operations, until a threshold value is reached. This is set on each DataNode via the dfs.datanode.failed.volumes.tolerated property; it defaults to 0, meaning that any volume failure will shut down the DataNode; on a production cluster where DataNodes typically have 6, 8, or 12 disks, setting this parameter to 1 or 2 is typically the best practice.</p> | `{TEMPLATE_NAME:hadoop.namenode.volume_failures_total.min(15m)}>0` | AVERAGE | |
-| NameNode: Cluster has DataNodes in Dead state | <p>The death of a DataNode causes a flurry of network activity, as the NameNode initiates replication of blocks lost on the dead nodes.</p> | `{TEMPLATE_NAME:hadoop.namenode.num_dead_data_nodes.min(5m)}>0` | AVERAGE | |
-| {#HOSTNAME}: Service has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:hadoop.nodemanager.uptime[{#HOSTNAME}].last()}<10m` | INFO | <p>Manual close: YES</p> |
-| {#HOSTNAME}: Failed to fetch NodeManager API page (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes.</p> | `{TEMPLATE_NAME:hadoop.nodemanager.uptime[{#HOSTNAME}].nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- {#HOSTNAME}: NodeManager has state {ITEM.VALUE}.</p> |
-| {#HOSTNAME}: NodeManager has state {ITEM.VALUE}. | <p>The state is different from normal.</p> | `{TEMPLATE_NAME:hadoop.nodemanager.state[{#HOSTNAME}].last()}<>"RUNNING"` | AVERAGE | |
-| {#HOSTNAME}: Service has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:hadoop.datanode.uptime[{#HOSTNAME}].last()}<10m` | INFO | <p>Manual close: YES</p> |
-| {#HOSTNAME}: Failed to fetch DataNode API page (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes.</p> | `{TEMPLATE_NAME:hadoop.datanode.uptime[{#HOSTNAME}].nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- {#HOSTNAME}: DataNode has state {ITEM.VALUE}.</p> |
-| {#HOSTNAME}: DataNode has state {ITEM.VALUE}. | <p>The state is different from normal.</p> | `{TEMPLATE_NAME:hadoop.datanode.oper_state[{#HOSTNAME}].last()}<>"Live"` | AVERAGE | |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|ResourceManager: Service is unavailable |<p>-</p> |`last(/Hadoop by HTTP/net.tcp.service["tcp","{$HADOOP.RESOURCEMANAGER.HOST}","{$HADOOP.RESOURCEMANAGER.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|ResourceManager: Service response time is too high (over {$HADOOP.RESOURCEMANAGER.RESPONSE_TIME.MAX.WARN} for 5m) |<p>-</p> |`min(/Hadoop by HTTP/net.tcp.service.perf["tcp","{$HADOOP.RESOURCEMANAGER.HOST}","{$HADOOP.RESOURCEMANAGER.PORT}"],5m)>{$HADOOP.RESOURCEMANAGER.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- ResourceManager: Service is unavailable</p> |
+|ResourceManager: Service has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Hadoop by HTTP/hadoop.resourcemanager.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|ResourceManager: Failed to fetch ResourceManager API page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/Hadoop by HTTP/hadoop.resourcemanager.uptime,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- ResourceManager: Service is unavailable</p> |
+|ResourceManager: Cluster has no active NodeManagers |<p>Cluster is unable to execute any jobs without at least one NodeManager.</p> |`max(/Hadoop by HTTP/hadoop.resourcemanager.num_active_nm,5m)=0` |HIGH | |
+|ResourceManager: Cluster has unhealthy NodeManagers |<p>YARN considers any node with disk utilization exceeding the value specified under the property yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage (in yarn-site.xml) to be unhealthy. Ample disk space is critical to ensure uninterrupted operation of a Hadoop cluster, and large numbers of unhealthyNodes (the number to alert on depends on the size of your cluster) should be quickly investigated and resolved.</p> |`min(/Hadoop by HTTP/hadoop.resourcemanager.num_unhealthy_nm,15m)>0` |AVERAGE | |
+|NameNode: Service is unavailable |<p>-</p> |`last(/Hadoop by HTTP/net.tcp.service["tcp","{$HADOOP.NAMENODE.HOST}","{$HADOOP.NAMENODE.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|NameNode: Service response time is too high (over {$HADOOP.NAMENODE.RESPONSE_TIME.MAX.WARN} for 5m) |<p>-</p> |`min(/Hadoop by HTTP/net.tcp.service.perf["tcp","{$HADOOP.NAMENODE.HOST}","{$HADOOP.NAMENODE.PORT}"],5m)>{$HADOOP.NAMENODE.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- NameNode: Service is unavailable</p> |
+|NameNode: Service has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Hadoop by HTTP/hadoop.namenode.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|NameNode: Failed to fetch NameNode API page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/Hadoop by HTTP/hadoop.namenode.uptime,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- NameNode: Service is unavailable</p> |
+|NameNode: Cluster capacity remaining is low (below {$HADOOP.CAPACITY_REMAINING.MIN.WARN}% for 15m) |<p>A good practice is to ensure that disk use never exceeds 80 percent capacity.</p> |`max(/Hadoop by HTTP/hadoop.namenode.percent_remaining,15m)<{$HADOOP.CAPACITY_REMAINING.MIN.WARN}` |WARNING | |
+|NameNode: Cluster has missing blocks |<p>A missing block is far worse than a corrupt block, because a missing block cannot be recovered by copying a replica.</p> |`min(/Hadoop by HTTP/hadoop.namenode.missing_blocks,15m)>0` |AVERAGE | |
+|NameNode: Cluster has volume failures |<p>HDFS now allows for disks to fail in place, without affecting DataNode operations, until a threshold value is reached. This is set on each DataNode via the dfs.datanode.failed.volumes.tolerated property; it defaults to 0, meaning that any volume failure will shut down the DataNode; on a production cluster where DataNodes typically have 6, 8, or 12 disks, setting this parameter to 1 or 2 is typically the best practice.</p> |`min(/Hadoop by HTTP/hadoop.namenode.volume_failures_total,15m)>0` |AVERAGE | |
+|NameNode: Cluster has DataNodes in Dead state |<p>The death of a DataNode causes a flurry of network activity, as the NameNode initiates replication of blocks lost on the dead nodes.</p> |`min(/Hadoop by HTTP/hadoop.namenode.num_dead_data_nodes,5m)>0` |AVERAGE | |
+|{#HOSTNAME}: Service has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Hadoop by HTTP/hadoop.nodemanager.uptime[{#HOSTNAME}])<10m` |INFO |<p>Manual close: YES</p> |
+|{#HOSTNAME}: Failed to fetch NodeManager API page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/Hadoop by HTTP/hadoop.nodemanager.uptime[{#HOSTNAME}],30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- {#HOSTNAME}: NodeManager has state {ITEM.VALUE}.</p> |
+|{#HOSTNAME}: NodeManager has state {ITEM.VALUE}. |<p>The state is different from normal.</p> |`last(/Hadoop by HTTP/hadoop.nodemanager.state[{#HOSTNAME}])<>"RUNNING"` |AVERAGE | |
+|{#HOSTNAME}: Service has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Hadoop by HTTP/hadoop.datanode.uptime[{#HOSTNAME}])<10m` |INFO |<p>Manual close: YES</p> |
+|{#HOSTNAME}: Failed to fetch DataNode API page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/Hadoop by HTTP/hadoop.datanode.uptime[{#HOSTNAME}],30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- {#HOSTNAME}: DataNode has state {ITEM.VALUE}.</p> |
+|{#HOSTNAME}: DataNode has state {ITEM.VALUE}. |<p>The state is different from normal.</p> |`last(/Hadoop by HTTP/hadoop.datanode.oper_state[{#HOSTNAME}])<>"Live"` |AVERAGE | |
## Feedback
diff --git a/templates/app/hadoop_http/template_app_hadoop_http.yaml b/templates/app/hadoop_http/template_app_hadoop_http.yaml
index b1c68a7034d..47b0f232f21 100644
--- a/templates/app/hadoop_http/template_app_hadoop_http.yaml
+++ b/templates/app/hadoop_http/template_app_hadoop_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:24Z'
+ date: '2021-12-19T15:19:38Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -15,7 +15,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/413459-discussion-thread-for-official-zabbix-template-hadoop
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/haproxy_agent/README.md b/templates/app/haproxy_agent/README.md
index a0184c4d160..fe8d105633a 100644
--- a/templates/app/haproxy_agent/README.md
+++ b/templates/app/haproxy_agent/README.md
@@ -14,7 +14,6 @@ Note that this solution supports https and redirects.
This template was tested on:
- HAProxy, version 1.8
-- Zabbix, version 5.4
## Setup
@@ -66,9 +65,9 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Backend discovery |<p>Discovery backends</p> |DEPENDENT |haproxy.backend.discovery<p>**Filter**:</p>AND <p>- A: {#SVNAME} MATCHES_REGEX `BACKEND`</p><p>- B: {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
-|Frontend discovery |<p>Discovery frontends</p> |DEPENDENT |haproxy.frontend.discovery<p>**Filter**:</p>AND <p>- A: {#SVNAME} MATCHES_REGEX `FRONTEND`</p><p>- B: {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
-|Server discovery |<p>Discovery servers</p> |DEPENDENT |haproxy.server.discovery<p>**Filter**:</p>AND <p>- A: {#SVNAME} NOT_MATCHES_REGEX `FRONTEND|BACKEND`</p><p>- B: {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
+|Backend discovery |<p>Discovery backends</p> |DEPENDENT |haproxy.backend.discovery<p>**Filter**:</p>AND <p>- {#SVNAME} MATCHES_REGEX `BACKEND`</p><p>- {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
+|Frontend discovery |<p>Discovery frontends</p> |DEPENDENT |haproxy.frontend.discovery<p>**Filter**:</p>AND <p>- {#SVNAME} MATCHES_REGEX `FRONTEND`</p><p>- {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
+|Server discovery |<p>Discovery servers</p> |DEPENDENT |haproxy.server.discovery<p>**Filter**:</p>AND <p>- {#SVNAME} NOT_MATCHES_REGEX `FRONTEND|BACKEND`</p><p>- {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
## Items collected
@@ -80,23 +79,23 @@ There are no template links in this template.
|HAProxy |HAProxy: Service response time |<p>-</p> |ZABBIX_PASSIVE |net.tcp.service.perf["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"] |
|HAProxy |HAProxy Backend {#PXNAME}: Status |<p>Possible values:</p><p>UP - The server is reporting as healthy.</p><p>DOWN - The server is reporting as unhealthy and unable to receive requests.</p><p>NOLB - You've added http-check disable-on-404 to the backend and the health checked URL has returned an HTTP 404 response.</p><p>MAINT - The server has been disabled or put into maintenance mode.</p><p>DRAIN - The server has been put into drain mode.</p><p>no check - Health checks are not enabled for this server.</p> |DEPENDENT |haproxy.backend.status[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Responses time |<p>Average backend response time (in ms) for the last 1,024 requests</p> |DEPENDENT |haproxy.backend.rtime[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].rtime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-|HAProxy |HAProxy Backend {#PXNAME}: Errors connection per second |<p>Number of requests that encountered an error attempting to connect to a backend server.</p> |DEPENDENT |haproxy.backend.econ.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].econ.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Responses denied per second |<p>Responses denied due to security concerns (ACL-restricted).</p> |DEPENDENT |haproxy.backend.dresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dresp.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Response errors per second |<p>Number of requests whose responses yielded an error</p> |DEPENDENT |haproxy.backend.eresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].eresp.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy Backend {#PXNAME}: Errors connection per second |<p>Number of requests that encountered an error attempting to connect to a backend server.</p> |DEPENDENT |haproxy.backend.econ.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].econ.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Responses denied per second |<p>Responses denied due to security concerns (ACL-restricted).</p> |DEPENDENT |haproxy.backend.dresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dresp.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Response errors per second |<p>Number of requests whose responses yielded an error</p> |DEPENDENT |haproxy.backend.eresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].eresp.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Unassigned requests |<p>Current number of requests unassigned in queue.</p> |DEPENDENT |haproxy.backend.qcur[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qcur.first()`</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Time in queue |<p>Average time spent in queue (in ms) for the last 1,024 requests</p> |DEPENDENT |haproxy.backend.qtime[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qtime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-|HAProxy |HAProxy Backend {#PXNAME}: Redispatched requests per second |<p>Number of times a request was redispatched to a different backend.</p> |DEPENDENT |haproxy.backend.wredis.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wredis.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Retried connections per second |<p>Number of times a connection was retried.</p> |DEPENDENT |haproxy.backend.wretr.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wretr.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.backend.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.backend.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.backend.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.backend.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.backend.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Incoming traffic |<p>Number of bits received by the backend</p> |DEPENDENT |haproxy.backend.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Outgoing traffic |<p>Number of bits sent by the backend</p> |DEPENDENT |haproxy.backend.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy Backend {#PXNAME}: Redispatched requests per second |<p>Number of times a request was redispatched to a different backend.</p> |DEPENDENT |haproxy.backend.wredis.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wredis.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Retried connections per second |<p>Number of times a connection was retried.</p> |DEPENDENT |haproxy.backend.wretr.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wretr.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.backend.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.backend.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.backend.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.backend.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.backend.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Incoming traffic |<p>Number of bits received by the backend</p> |DEPENDENT |haproxy.backend.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Outgoing traffic |<p>Number of bits sent by the backend</p> |DEPENDENT |haproxy.backend.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Number of active servers |<p>Number of active servers.</p> |DEPENDENT |haproxy.backend.act[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].act.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Number of backup servers |<p>Number of backup servers.</p> |DEPENDENT |haproxy.backend.bck[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bck.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|HAProxy |HAProxy Backend {#PXNAME}: Sessions per second |<p>Cumulative number of sessions (end-to-end connections) per second.</p> |DEPENDENT |haproxy.backend.stot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].stot.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy Backend {#PXNAME}: Sessions per second |<p>Cumulative number of sessions (end-to-end connections) per second.</p> |DEPENDENT |haproxy.backend.stot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].stot.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Weight |<p>Total effective weight.</p> |DEPENDENT |haproxy.backend.weight[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].weight.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy Frontend {#PXNAME}: Status |<p>Possible values: OPEN, STOP.</p><p>When Status is OPEN, the frontend is operating normally and ready to receive traffic.</p> |DEPENDENT |haproxy.frontend.status[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
|HAProxy |HAProxy Frontend {#PXNAME}: Requests rate |<p>HTTP requests per second</p> |DEPENDENT |haproxy.frontend.req_rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].req_rate.first()`</p> |
@@ -104,37 +103,37 @@ There are no template links in this template.
|HAProxy |HAProxy Frontend {#PXNAME}: Established sessions |<p>The current number of established sessions.</p> |DEPENDENT |haproxy.frontend.scur[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].scur.first()`</p> |
|HAProxy |HAProxy Frontend {#PXNAME}: Session limits |<p>The most simultaneous sessions that are allowed, as defined by the maxconn setting in the frontend.</p> |DEPENDENT |haproxy.frontend.slim[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].slim.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy Frontend {#PXNAME}: Session utilization |<p>Percentage of sessions used (scur / slim * 100).</p> |CALCULATED |haproxy.frontend.sutil[{#PXNAME},{#SVNAME}]<p>**Expression**:</p>`last(//haproxy.frontend.scur[{#PXNAME},{#SVNAME}]) / last(//haproxy.frontend.slim[{#PXNAME},{#SVNAME}]) * 100` |
-|HAProxy |HAProxy Frontend {#PXNAME}: Request errors per second |<p>Number of request errors per second.</p> |DEPENDENT |haproxy.frontend.ereq.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].ereq.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Denied requests per second |<p>Requests denied due to security concerns (ACL-restricted) per second.</p> |DEPENDENT |haproxy.frontend.dreq.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dreq.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.frontend.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.frontend.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.frontend.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.frontend.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.frontend.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Incoming traffic |<p>Number of bits received by the frontend</p> |DEPENDENT |haproxy.frontend.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Outgoing traffic |<p>Number of bits sent by the frontend</p> |DEPENDENT |haproxy.frontend.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy Frontend {#PXNAME}: Request errors per second |<p>Number of request errors per second.</p> |DEPENDENT |haproxy.frontend.ereq.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].ereq.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Denied requests per second |<p>Requests denied due to security concerns (ACL-restricted) per second.</p> |DEPENDENT |haproxy.frontend.dreq.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dreq.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.frontend.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.frontend.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.frontend.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.frontend.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.frontend.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Incoming traffic |<p>Number of bits received by the frontend</p> |DEPENDENT |haproxy.frontend.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Outgoing traffic |<p>Number of bits sent by the frontend</p> |DEPENDENT |haproxy.frontend.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Status | |DEPENDENT |haproxy.server.status[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Responses time |<p>Average server response time (in ms) for the last 1,024 requests.</p> |DEPENDENT |haproxy.server.rtime[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].rtime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Errors connection per second |<p>Number of requests that encountered an error attempting to connect to a backend server.</p> |DEPENDENT |haproxy.server.econ.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].econ.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Responses denied per second |<p>Responses denied due to security concerns (ACL-restricted).</p> |DEPENDENT |haproxy.server.dresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dresp.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Response errors per second |<p>Number of requests whose responses yielded an error.</p> |DEPENDENT |haproxy.server.eresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].eresp.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Errors connection per second |<p>Number of requests that encountered an error attempting to connect to a backend server.</p> |DEPENDENT |haproxy.server.econ.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].econ.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Responses denied per second |<p>Responses denied due to security concerns (ACL-restricted).</p> |DEPENDENT |haproxy.server.dresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dresp.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Response errors per second |<p>Number of requests whose responses yielded an error.</p> |DEPENDENT |haproxy.server.eresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].eresp.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Unassigned requests |<p>Current number of requests unassigned in queue.</p> |DEPENDENT |haproxy.server.qcur[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qcur.first()`</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Time in queue |<p>Average time spent in queue (in ms) for the last 1,024 requests.</p> |DEPENDENT |haproxy.server.qtime[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qtime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Redispatched requests per second |<p>Number of times a request was redispatched to a different backend.</p> |DEPENDENT |haproxy.server.wredis.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wredis.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Retried connections per second |<p>Number of times a connection was retried.</p> |DEPENDENT |haproxy.server.wretr.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wretr.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.server.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.server.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.server.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.server.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.server.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Incoming traffic |<p>Number of bits received by the backend</p> |DEPENDENT |haproxy.server.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Outgoing traffic |<p>Number of bits sent by the backend</p> |DEPENDENT |haproxy.server.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Redispatched requests per second |<p>Number of times a request was redispatched to a different backend.</p> |DEPENDENT |haproxy.server.wredis.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wredis.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Retried connections per second |<p>Number of times a connection was retried.</p> |DEPENDENT |haproxy.server.wretr.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wretr.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.server.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.server.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.server.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.server.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.server.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Incoming traffic |<p>Number of bits received by the backend</p> |DEPENDENT |haproxy.server.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Outgoing traffic |<p>Number of bits sent by the backend</p> |DEPENDENT |haproxy.server.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Server is active |<p>Shows whether the server is active (marked with a Y) or a backup (marked with a -).</p> |DEPENDENT |haproxy.server.act[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].act.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Server is backup |<p>Shows whether the server is a backup (marked with a Y) or active (marked with a -).</p> |DEPENDENT |haproxy.server.bck[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bck.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Sessions per second |<p>Cumulative number of sessions (end-to-end connections) per second.</p> |DEPENDENT |haproxy.server.stot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].stot.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Sessions per second |<p>Cumulative number of sessions (end-to-end connections) per second.</p> |DEPENDENT |haproxy.server.stot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].stot.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Weight |<p>Effective weight.</p> |DEPENDENT |haproxy.server.weight[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].weight.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Configured maxqueue |<p>Configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit).</p> |DEPENDENT |haproxy.server.qlimit[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qlimit.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>- MATCHES_REGEX: `^\d+$`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Server was selected per second |<p>Number of times that server was selected.</p> |DEPENDENT |haproxy.server.lbtot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].lbtot.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Server was selected per second |<p>Number of times that server was selected.</p> |DEPENDENT |haproxy.server.lbtot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].lbtot.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Status of last health check |<p>Status of last health check, one of:</p><p>UNK -> unknown</p><p>INI -> initializing</p><p>SOCKERR -> socket error</p><p>L4OK -> check passed on layer 4, no upper layers testing enabled</p><p>L4TOUT -> layer 1-4 timeout</p><p>L4CON -> layer 1-4 connection problem, for example "Connection refused" (tcp rst) or "No route to host" (icmp)</p><p>L6OK -> check passed on layer 6</p><p>L6TOUT -> layer 6 (SSL) timeout</p><p>L6RSP -> layer 6 invalid response - protocol error</p><p>L7OK -> check passed on layer 7</p><p>L7OKC -> check conditionally passed on layer 7, for example 404 with disable-on-404</p><p>L7TOUT -> layer 7 (HTTP/SMTP) timeout</p><p>L7RSP -> layer 7 invalid response - protocol error</p><p>L7STS -> layer 7 response error, for example HTTP 5xx</p><p>Notice: If a check is currently running, the last known status will be reported, prefixed with "* ". e. g. "* L7OK".</p> |DEPENDENT |haproxy.server.check_status[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].check_status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|Zabbix_raw_items |HAProxy: Get stats |<p>HAProxy Statistics Report in CSV format</p> |ZABBIX_PASSIVE |web.page.get["{$HAPROXY.STATS.SCHEME}://{HOST.CONN}:{$HAPROXY.STATS.PORT}/{$HAPROXY.STATS.PATH};csv"]<p>**Preprocessing**:</p><p>- REGEX: `# ([\s\S]*) \1`</p><p>- CSV_TO_JSON: ` 1`</p> |
|Zabbix_raw_items |HAProxy: Get nodes |<p>Array for LLD rules.</p> |DEPENDENT |haproxy.get.nodes<p>**Preprocessing**:</p><p>- JAVASCRIPT: `return JSON.stringify(JSON.parse(value),['mode','pxname','svname'])`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
@@ -144,24 +143,24 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|HAProxy: Version has changed (new version: {ITEM.VALUE}) |<p>HAProxy version has changed. Ack to close.</p> |`last(/TEMPLATE_NAME/haproxy.version,#1)<>last(/TEMPLATE_NAME/haproxy.version,#2) and length(last(/TEMPLATE_NAME/haproxy.version))>0` |INFO |<p>Manual close: YES</p> |
-|HAProxy: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/TEMPLATE_NAME/haproxy.uptime)<10m` |INFO |<p>Manual close: YES</p> |
-|HAProxy: Service is down |<p>-</p> |`last(/TEMPLATE_NAME/net.tcp.service["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
-|HAProxy: Service response time is too high (over {$HAPROXY.RESPONSE_TIME.MAX.WARN} for 5m) |<p>-</p> |`min(/TEMPLATE_NAME/net.tcp.service.perf["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"],5m)>{$HAPROXY.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- HAProxy: Service is down</p> |
-|HAProxy backend {#PXNAME}: Server is DOWN |<p>Backend is not available.</p> |`count(/TEMPLATE_NAME/haproxy.backend.status[{#PXNAME},{#SVNAME}],#5,"eq","DOWN")=5` |AVERAGE | |
-|HAProxy backend {#PXNAME}: Average response time is more than {$HAPROXY.BACK_RTIME.MAX.WARN} for 5m |<p>Average backend response time (in ms) for the last 1,024 requests is more than {$HAPROXY.BACK_RTIME.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.backend.rtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_RTIME.MAX.WARN}` |WARNING | |
-|HAProxy backend {#PXNAME}: Number of responses with error is more than {$HAPROXY.BACK_ERESP.MAX.WARN} for 5m |<p>Number of requests on backend, whose responses yielded an error, is more than {$HAPROXY.BACK_ERESP.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.backend.eresp.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_ERESP.MAX.WARN}` |WARNING | |
-|HAProxy backend {#PXNAME}: Current number of requests unassigned in queue is more than {$HAPROXY.BACK_QCUR.MAX.WARN} for 5m |<p>Current number of requests on backend unassigned in queue is more than {$HAPROXY.BACK_QCUR.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.backend.qcur[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_QCUR.MAX.WARN}` |WARNING | |
-|HAProxy backend {#PXNAME}: Average time spent in queue is more than {$HAPROXY.BACK_QTIME.MAX.WARN} for 5m |<p>Average time spent in queue (in ms) for the last 1,024 requests is more than {$HAPROXY.BACK_QTIME.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.backend.qtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_QTIME.MAX.WARN}` |WARNING | |
-|HAProxy frontend {#PXNAME}: Session utilization is more than {$HAPROXY.FRONT_SUTIL.MAX.WARN}% for 5m |<p>Alerting on this metric is essential to ensure your server has sufficient capacity to handle all concurrent sessions. Unlike requests, upon reaching the session limit HAProxy will deny additional clients until resource consumption drops. Furthermore, if you find your session usage percentage to be hovering above 80%, it could be time to either modify HAProxy’s configuration to allow more sessions, or migrate your HAProxy server to a bigger box.</p> |`min(/TEMPLATE_NAME/haproxy.frontend.sutil[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_SUTIL.MAX.WARN}` |WARNING | |
-|HAProxy frontend {#PXNAME}: Number of request errors is more than {$HAPROXY.FRONT_EREQ.MAX.WARN} for 5m |<p>Number of request errors is more than {$HAPROXY.FRONT_EREQ.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.frontend.ereq.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_EREQ.MAX.WARN}` |WARNING | |
-|HAProxy frontend {#PXNAME}: Number of requests denied is more than {$HAPROXY.FRONT_DREQ.MAX.WARN} for 5m |<p>Number of requests denied due to security concerns (ACL-restricted) is more than {$HAPROXY.FRONT_DREQ.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.frontend.dreq.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_DREQ.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Server is DOWN |<p>Server is not available.</p> |`count(/TEMPLATE_NAME/haproxy.server.status[{#PXNAME},{#SVNAME}],#5,"eq","DOWN")=5` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Average response time is more than {$HAPROXY.SERVER_RTIME.MAX.WARN} for 5m |<p>Average server response time (in ms) for the last 1,024 requests is more than {$HAPROXY.SERVER_RTIME.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.server.rtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_RTIME.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Number of responses with error is more than {$HAPROXY.SERVER_ERESP.MAX.WARN} for 5m |<p>Number of requests on server, whose responses yielded an error, is more than {$HAPROXY.SERVER_ERESP.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.server.eresp.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_ERESP.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Current number of requests unassigned in queue is more than {$HAPROXY.SERVER_QCUR.MAX.WARN} for 5m |<p>Current number of requests unassigned in queue is more than {$HAPROXY.SERVER_QCUR.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.server.qcur[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_QCUR.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Average time spent in queue is more than {$HAPROXY.SERVER_QTIME.MAX.WARN} for 5m |<p>Average time spent in queue (in ms) for the last 1,024 requests is more than {$HAPROXY.SERVER_QTIME.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.server.qtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_QTIME.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Health check error |<p>Please check the server for faults.</p> |`find(/TEMPLATE_NAME/haproxy.server.check_status[{#PXNAME},{#SVNAME}],#3,"regexp","(?:L[4-7]OK|^$)")=0` |WARNING |<p>**Depends on**:</p><p>- HAProxy {#PXNAME} {#SVNAME}: Server is DOWN</p> |
+|HAProxy: Version has changed (new version: {ITEM.VALUE}) |<p>HAProxy version has changed. Ack to close.</p> |`last(/HAProxy by Zabbix agent/haproxy.version,#1)<>last(/HAProxy by Zabbix agent/haproxy.version,#2) and length(last(/HAProxy by Zabbix agent/haproxy.version))>0` |INFO |<p>Manual close: YES</p> |
+|HAProxy: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/HAProxy by Zabbix agent/haproxy.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|HAProxy: Service is down |<p>-</p> |`last(/HAProxy by Zabbix agent/net.tcp.service["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|HAProxy: Service response time is too high (over {$HAPROXY.RESPONSE_TIME.MAX.WARN} for 5m) |<p>-</p> |`min(/HAProxy by Zabbix agent/net.tcp.service.perf["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"],5m)>{$HAPROXY.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- HAProxy: Service is down</p> |
+|HAProxy backend {#PXNAME}: Server is DOWN |<p>Backend is not available.</p> |`count(/HAProxy by Zabbix agent/haproxy.backend.status[{#PXNAME},{#SVNAME}],#5,"eq","DOWN")=5` |AVERAGE | |
+|HAProxy backend {#PXNAME}: Average response time is more than {$HAPROXY.BACK_RTIME.MAX.WARN} for 5m |<p>Average backend response time (in ms) for the last 1,024 requests is more than {$HAPROXY.BACK_RTIME.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.backend.rtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_RTIME.MAX.WARN}` |WARNING | |
+|HAProxy backend {#PXNAME}: Number of responses with error is more than {$HAPROXY.BACK_ERESP.MAX.WARN} for 5m |<p>Number of requests on backend, whose responses yielded an error, is more than {$HAPROXY.BACK_ERESP.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.backend.eresp.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_ERESP.MAX.WARN}` |WARNING | |
+|HAProxy backend {#PXNAME}: Current number of requests unassigned in queue is more than {$HAPROXY.BACK_QCUR.MAX.WARN} for 5m |<p>Current number of requests on backend unassigned in queue is more than {$HAPROXY.BACK_QCUR.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.backend.qcur[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_QCUR.MAX.WARN}` |WARNING | |
+|HAProxy backend {#PXNAME}: Average time spent in queue is more than {$HAPROXY.BACK_QTIME.MAX.WARN} for 5m |<p>Average time spent in queue (in ms) for the last 1,024 requests is more than {$HAPROXY.BACK_QTIME.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.backend.qtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_QTIME.MAX.WARN}` |WARNING | |
+|HAProxy frontend {#PXNAME}: Session utilization is more than {$HAPROXY.FRONT_SUTIL.MAX.WARN}% for 5m |<p>Alerting on this metric is essential to ensure your server has sufficient capacity to handle all concurrent sessions. Unlike requests, upon reaching the session limit HAProxy will deny additional clients until resource consumption drops. Furthermore, if you find your session usage percentage to be hovering above 80%, it could be time to either modify HAProxy's configuration to allow more sessions, or migrate your HAProxy server to a bigger box.</p> |`min(/HAProxy by Zabbix agent/haproxy.frontend.sutil[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_SUTIL.MAX.WARN}` |WARNING | |
+|HAProxy frontend {#PXNAME}: Number of request errors is more than {$HAPROXY.FRONT_EREQ.MAX.WARN} for 5m |<p>Number of request errors is more than {$HAPROXY.FRONT_EREQ.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.frontend.ereq.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_EREQ.MAX.WARN}` |WARNING | |
+|HAProxy frontend {#PXNAME}: Number of requests denied is more than {$HAPROXY.FRONT_DREQ.MAX.WARN} for 5m |<p>Number of requests denied due to security concerns (ACL-restricted) is more than {$HAPROXY.FRONT_DREQ.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.frontend.dreq.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_DREQ.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Server is DOWN |<p>Server is not available.</p> |`count(/HAProxy by Zabbix agent/haproxy.server.status[{#PXNAME},{#SVNAME}],#5,"eq","DOWN")=5` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Average response time is more than {$HAPROXY.SERVER_RTIME.MAX.WARN} for 5m |<p>Average server response time (in ms) for the last 1,024 requests is more than {$HAPROXY.SERVER_RTIME.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.server.rtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_RTIME.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Number of responses with error is more than {$HAPROXY.SERVER_ERESP.MAX.WARN} for 5m |<p>Number of requests on server, whose responses yielded an error, is more than {$HAPROXY.SERVER_ERESP.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.server.eresp.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_ERESP.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Current number of requests unassigned in queue is more than {$HAPROXY.SERVER_QCUR.MAX.WARN} for 5m |<p>Current number of requests unassigned in queue is more than {$HAPROXY.SERVER_QCUR.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.server.qcur[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_QCUR.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Average time spent in queue is more than {$HAPROXY.SERVER_QTIME.MAX.WARN} for 5m |<p>Average time spent in queue (in ms) for the last 1,024 requests is more than {$HAPROXY.SERVER_QTIME.MAX.WARN}.</p> |`min(/HAProxy by Zabbix agent/haproxy.server.qtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_QTIME.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Health check error |<p>Please check the server for faults.</p> |`find(/HAProxy by Zabbix agent/haproxy.server.check_status[{#PXNAME},{#SVNAME}],#3,"regexp","(?:L[4-7]OK|^$)")=0` |WARNING |<p>**Depends on**:</p><p>- HAProxy {#PXNAME} {#SVNAME}: Server is DOWN</p> |
## Feedback
diff --git a/templates/app/haproxy_agent/template_app_haproxy_agent.yaml b/templates/app/haproxy_agent/template_app_haproxy_agent.yaml
index 0953efcf0b0..f86b603611d 100644
--- a/templates/app/haproxy_agent/template_app_haproxy_agent.yaml
+++ b/templates/app/haproxy_agent/template_app_haproxy_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-08-09T08:38:36Z'
+ date: '2021-12-19T15:19:39Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -22,7 +22,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/393527-discussion-thread-for-official-zabbix-template-haproxy
- Template tooling version used: 0.39
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -1706,7 +1706,7 @@ zabbix_export:
expression: 'min(/HAProxy by Zabbix agent/haproxy.frontend.sutil[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_SUTIL.MAX.WARN}'
name: 'HAProxy frontend {#PXNAME}: Session utilization is more than {$HAPROXY.FRONT_SUTIL.MAX.WARN}% for 5m'
priority: WARNING
- description: 'Alerting on this metric is essential to ensure your server has sufficient capacity to handle all concurrent sessions. Unlike requests, upon reaching the session limit HAProxy will deny additional clients until resource consumption drops. Furthermore, if you find your session usage percentage to be hovering above 80%, it could be time to either modify HAProxy’s configuration to allow more sessions, or migrate your HAProxy server to a bigger box.'
+ description: 'Alerting on this metric is essential to ensure your server has sufficient capacity to handle all concurrent sessions. Unlike requests, upon reaching the session limit HAProxy will deny additional clients until resource consumption drops. Furthermore, if you find your session usage percentage to be hovering above 80%, it could be time to either modify HAProxy''s configuration to allow more sessions, or migrate your HAProxy server to a bigger box.'
graph_prototypes:
-
uuid: 30141ef1c5bf4c4a9cca0f8ce2ea3373
@@ -2911,7 +2911,7 @@ zabbix_export:
-
type: GRAPH_PROTOTYPE
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -2924,7 +2924,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -2933,9 +2933,9 @@ zabbix_export:
host: 'HAProxy by Zabbix agent'
-
type: GRAPH_PROTOTYPE
- 'y': '12'
+ 'y': '5'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -2948,7 +2948,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -2957,9 +2957,9 @@ zabbix_export:
host: 'HAProxy by Zabbix agent'
-
type: GRAPH_PROTOTYPE
- 'y': '24'
+ 'y': '10'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -2972,7 +2972,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -2988,7 +2988,7 @@ zabbix_export:
-
type: GRAPH_PROTOTYPE
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3001,7 +3001,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3010,9 +3010,9 @@ zabbix_export:
host: 'HAProxy by Zabbix agent'
-
type: GRAPH_PROTOTYPE
- 'y': '12'
+ 'y': '5'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3025,7 +3025,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3034,9 +3034,9 @@ zabbix_export:
host: 'HAProxy by Zabbix agent'
-
type: GRAPH_PROTOTYPE
- 'y': '24'
+ 'y': '10'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3049,7 +3049,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3058,9 +3058,9 @@ zabbix_export:
host: 'HAProxy by Zabbix agent'
-
type: GRAPH_PROTOTYPE
- 'y': '36'
+ 'y': '15'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3073,7 +3073,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3089,7 +3089,7 @@ zabbix_export:
-
type: GRAPH_PROTOTYPE
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3102,7 +3102,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3111,9 +3111,9 @@ zabbix_export:
host: 'HAProxy by Zabbix agent'
-
type: GRAPH_PROTOTYPE
- 'y': '12'
+ 'y': '5'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3126,7 +3126,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3135,9 +3135,9 @@ zabbix_export:
host: 'HAProxy by Zabbix agent'
-
type: GRAPH_PROTOTYPE
- 'y': '24'
+ 'y': '10'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3150,7 +3150,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
diff --git a/templates/app/haproxy_http/README.md b/templates/app/haproxy_http/README.md
index 35943681d4e..c9e6c19f297 100644
--- a/templates/app/haproxy_http/README.md
+++ b/templates/app/haproxy_http/README.md
@@ -14,7 +14,6 @@ Note that this solution supports https and redirects.
This template was tested on:
- HAProxy, version 1.8
-- Zabbix, version 5.4
## Setup
@@ -73,9 +72,9 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Backend discovery |<p>Discovery backends</p> |DEPENDENT |haproxy.backend.discovery<p>**Filter**:</p>AND <p>- A: {#SVNAME} MATCHES_REGEX `BACKEND`</p><p>- B: {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
-|Frontend discovery |<p>Discovery frontends</p> |DEPENDENT |haproxy.frontend.discovery<p>**Filter**:</p>AND <p>- A: {#SVNAME} MATCHES_REGEX `FRONTEND`</p><p>- B: {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
-|Server discovery |<p>Discovery servers</p> |DEPENDENT |haproxy.server.discovery<p>**Filter**:</p>AND <p>- A: {#SVNAME} NOT_MATCHES_REGEX `FRONTEND|BACKEND`</p><p>- B: {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
+|Backend discovery |<p>Discovery backends</p> |DEPENDENT |haproxy.backend.discovery<p>**Filter**:</p>AND <p>- {#SVNAME} MATCHES_REGEX `BACKEND`</p><p>- {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
+|Frontend discovery |<p>Discovery frontends</p> |DEPENDENT |haproxy.frontend.discovery<p>**Filter**:</p>AND <p>- {#SVNAME} MATCHES_REGEX `FRONTEND`</p><p>- {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
+|Server discovery |<p>Discovery servers</p> |DEPENDENT |haproxy.server.discovery<p>**Filter**:</p>AND <p>- {#SVNAME} NOT_MATCHES_REGEX `FRONTEND|BACKEND`</p><p>- {#MODE} MATCHES_REGEX `http|tcp`</p><p>**Overrides:**</p><p>Discard HTTP status codes<br> - {#MODE} MATCHES_REGEX `tcp`<br> - ITEM_PROTOTYPE LIKE `Number of responses with codes` - NO_DISCOVER</p> |
## Items collected
@@ -87,23 +86,23 @@ There are no template links in this template.
|HAProxy |HAProxy: Service response time |<p>-</p> |SIMPLE |net.tcp.service.perf["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"] |
|HAProxy |HAProxy Backend {#PXNAME}: Status |<p>Possible values:</p><p>UP - The server is reporting as healthy.</p><p>DOWN - The server is reporting as unhealthy and unable to receive requests.</p><p>NOLB - You've added http-check disable-on-404 to the backend and the health checked URL has returned an HTTP 404 response.</p><p>MAINT - The server has been disabled or put into maintenance mode.</p><p>DRAIN - The server has been put into drain mode.</p><p>no check - Health checks are not enabled for this server.</p> |DEPENDENT |haproxy.backend.status[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Responses time |<p>Average backend response time (in ms) for the last 1,024 requests</p> |DEPENDENT |haproxy.backend.rtime[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].rtime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-|HAProxy |HAProxy Backend {#PXNAME}: Errors connection per second |<p>Number of requests that encountered an error attempting to connect to a backend server.</p> |DEPENDENT |haproxy.backend.econ.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].econ.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Responses denied per second |<p>Responses denied due to security concerns (ACL-restricted).</p> |DEPENDENT |haproxy.backend.dresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dresp.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Response errors per second |<p>Number of requests whose responses yielded an error</p> |DEPENDENT |haproxy.backend.eresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].eresp.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy Backend {#PXNAME}: Errors connection per second |<p>Number of requests that encountered an error attempting to connect to a backend server.</p> |DEPENDENT |haproxy.backend.econ.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].econ.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Responses denied per second |<p>Responses denied due to security concerns (ACL-restricted).</p> |DEPENDENT |haproxy.backend.dresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dresp.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Response errors per second |<p>Number of requests whose responses yielded an error</p> |DEPENDENT |haproxy.backend.eresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].eresp.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Unassigned requests |<p>Current number of requests unassigned in queue.</p> |DEPENDENT |haproxy.backend.qcur[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qcur.first()`</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Time in queue |<p>Average time spent in queue (in ms) for the last 1,024 requests</p> |DEPENDENT |haproxy.backend.qtime[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qtime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-|HAProxy |HAProxy Backend {#PXNAME}: Redispatched requests per second |<p>Number of times a request was redispatched to a different backend.</p> |DEPENDENT |haproxy.backend.wredis.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wredis.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Retried connections per second |<p>Number of times a connection was retried.</p> |DEPENDENT |haproxy.backend.wretr.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wretr.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.backend.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.backend.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.backend.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.backend.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.backend.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Incoming traffic |<p>Number of bits received by the backend</p> |DEPENDENT |haproxy.backend.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Backend {#PXNAME}: Outgoing traffic |<p>Number of bits sent by the backend</p> |DEPENDENT |haproxy.backend.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy Backend {#PXNAME}: Redispatched requests per second |<p>Number of times a request was redispatched to a different backend.</p> |DEPENDENT |haproxy.backend.wredis.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wredis.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Retried connections per second |<p>Number of times a connection was retried.</p> |DEPENDENT |haproxy.backend.wretr.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wretr.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.backend.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.backend.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.backend.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.backend.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.backend.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Incoming traffic |<p>Number of bits received by the backend</p> |DEPENDENT |haproxy.backend.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Backend {#PXNAME}: Outgoing traffic |<p>Number of bits sent by the backend</p> |DEPENDENT |haproxy.backend.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Number of active servers |<p>Number of active servers.</p> |DEPENDENT |haproxy.backend.act[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].act.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Number of backup servers |<p>Number of backup servers.</p> |DEPENDENT |haproxy.backend.bck[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bck.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|HAProxy |HAProxy Backend {#PXNAME}: Sessions per second |<p>Cumulative number of sessions (end-to-end connections) per second.</p> |DEPENDENT |haproxy.backend.stot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].stot.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy Backend {#PXNAME}: Sessions per second |<p>Cumulative number of sessions (end-to-end connections) per second.</p> |DEPENDENT |haproxy.backend.stot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].stot.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy Backend {#PXNAME}: Weight |<p>Total effective weight.</p> |DEPENDENT |haproxy.backend.weight[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].weight.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy Frontend {#PXNAME}: Status |<p>Possible values: OPEN, STOP.</p><p>When Status is OPEN, the frontend is operating normally and ready to receive traffic.</p> |DEPENDENT |haproxy.frontend.status[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
|HAProxy |HAProxy Frontend {#PXNAME}: Requests rate |<p>HTTP requests per second</p> |DEPENDENT |haproxy.frontend.req_rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].req_rate.first()`</p> |
@@ -111,37 +110,37 @@ There are no template links in this template.
|HAProxy |HAProxy Frontend {#PXNAME}: Established sessions |<p>The current number of established sessions.</p> |DEPENDENT |haproxy.frontend.scur[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].scur.first()`</p> |
|HAProxy |HAProxy Frontend {#PXNAME}: Session limits |<p>The most simultaneous sessions that are allowed, as defined by the maxconn setting in the frontend.</p> |DEPENDENT |haproxy.frontend.slim[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].slim.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy Frontend {#PXNAME}: Session utilization |<p>Percentage of sessions used (scur / slim * 100).</p> |CALCULATED |haproxy.frontend.sutil[{#PXNAME},{#SVNAME}]<p>**Expression**:</p>`last(//haproxy.frontend.scur[{#PXNAME},{#SVNAME}]) / last(//haproxy.frontend.slim[{#PXNAME},{#SVNAME}]) * 100` |
-|HAProxy |HAProxy Frontend {#PXNAME}: Request errors per second |<p>Number of request errors per second.</p> |DEPENDENT |haproxy.frontend.ereq.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].ereq.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Denied requests per second |<p>Requests denied due to security concerns (ACL-restricted) per second.</p> |DEPENDENT |haproxy.frontend.dreq.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dreq.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.frontend.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.frontend.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.frontend.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.frontend.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.frontend.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Incoming traffic |<p>Number of bits received by the frontend</p> |DEPENDENT |haproxy.frontend.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy Frontend {#PXNAME}: Outgoing traffic |<p>Number of bits sent by the frontend</p> |DEPENDENT |haproxy.frontend.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy Frontend {#PXNAME}: Request errors per second |<p>Number of request errors per second.</p> |DEPENDENT |haproxy.frontend.ereq.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].ereq.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Denied requests per second |<p>Requests denied due to security concerns (ACL-restricted) per second.</p> |DEPENDENT |haproxy.frontend.dreq.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dreq.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.frontend.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.frontend.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.frontend.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.frontend.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.frontend.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Incoming traffic |<p>Number of bits received by the frontend</p> |DEPENDENT |haproxy.frontend.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy Frontend {#PXNAME}: Outgoing traffic |<p>Number of bits sent by the frontend</p> |DEPENDENT |haproxy.frontend.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Status | |DEPENDENT |haproxy.server.status[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Responses time |<p>Average server response time (in ms) for the last 1,024 requests.</p> |DEPENDENT |haproxy.server.rtime[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].rtime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Errors connection per second |<p>Number of requests that encountered an error attempting to connect to a backend server.</p> |DEPENDENT |haproxy.server.econ.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].econ.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Responses denied per second |<p>Responses denied due to security concerns (ACL-restricted).</p> |DEPENDENT |haproxy.server.dresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dresp.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Response errors per second |<p>Number of requests whose responses yielded an error.</p> |DEPENDENT |haproxy.server.eresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].eresp.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Errors connection per second |<p>Number of requests that encountered an error attempting to connect to a backend server.</p> |DEPENDENT |haproxy.server.econ.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].econ.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Responses denied per second |<p>Responses denied due to security concerns (ACL-restricted).</p> |DEPENDENT |haproxy.server.dresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].dresp.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Response errors per second |<p>Number of requests whose responses yielded an error.</p> |DEPENDENT |haproxy.server.eresp.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].eresp.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Unassigned requests |<p>Current number of requests unassigned in queue.</p> |DEPENDENT |haproxy.server.qcur[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qcur.first()`</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Time in queue |<p>Average time spent in queue (in ms) for the last 1,024 requests.</p> |DEPENDENT |haproxy.server.qtime[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qtime.first()`</p><p>- MULTIPLIER: `0.001`</p> |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Redispatched requests per second |<p>Number of times a request was redispatched to a different backend.</p> |DEPENDENT |haproxy.server.wredis.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wredis.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Retried connections per second |<p>Number of times a connection was retried.</p> |DEPENDENT |haproxy.server.wretr.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wretr.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.server.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.server.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.server.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.server.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.server.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Incoming traffic |<p>Number of bits received by the backend</p> |DEPENDENT |haproxy.server.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Outgoing traffic |<p>Number of bits sent by the backend</p> |DEPENDENT |haproxy.server.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Redispatched requests per second |<p>Number of times a request was redispatched to a different backend.</p> |DEPENDENT |haproxy.server.wredis.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wredis.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Retried connections per second |<p>Number of times a connection was retried.</p> |DEPENDENT |haproxy.server.wretr.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].wretr.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 1xx per second |<p>Number of informational HTTP responses per second.</p> |DEPENDENT |haproxy.server.hrsp_1xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_1xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 2xx per second |<p>Number of successful HTTP responses per second.</p> |DEPENDENT |haproxy.server.hrsp_2xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_2xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 3xx per second |<p>Number of HTTP redirections per second.</p> |DEPENDENT |haproxy.server.hrsp_3xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_3xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 4xx per second |<p>Number of HTTP client errors per second.</p> |DEPENDENT |haproxy.server.hrsp_4xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_4xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Number of responses with codes 5xx per second |<p>Number of HTTP server errors per second.</p> |DEPENDENT |haproxy.server.hrsp_5xx.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].hrsp_5xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Incoming traffic |<p>Number of bits received by the backend</p> |DEPENDENT |haproxy.server.bin.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bin.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Outgoing traffic |<p>Number of bits sent by the backend</p> |DEPENDENT |haproxy.server.bout.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bout.first()`</p><p>- MULTIPLIER: `8`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Server is active |<p>Shows whether the server is active (marked with a Y) or a backup (marked with a -).</p> |DEPENDENT |haproxy.server.act[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].act.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Server is backup |<p>Shows whether the server is a backup (marked with a Y) or active (marked with a -).</p> |DEPENDENT |haproxy.server.bck[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].bck.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Sessions per second |<p>Cumulative number of sessions (end-to-end connections) per second.</p> |DEPENDENT |haproxy.server.stot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].stot.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Sessions per second |<p>Cumulative number of sessions (end-to-end connections) per second.</p> |DEPENDENT |haproxy.server.stot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].stot.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Weight |<p>Effective weight.</p> |DEPENDENT |haproxy.server.weight[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].weight.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Configured maxqueue |<p>Configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit).</p> |DEPENDENT |haproxy.server.qlimit[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].qlimit.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>- MATCHES_REGEX: `^\d+$`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Server was selected per second |<p>Number of times that server was selected.</p> |DEPENDENT |haproxy.server.lbtot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].lbtot.first()`</p><p>- CHANGE_PER_SECOND |
+|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Server was selected per second |<p>Number of times that server was selected.</p> |DEPENDENT |haproxy.server.lbtot.rate[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].lbtot.first()`</p><p>- CHANGE_PER_SECOND</p> |
|HAProxy |HAProxy {#PXNAME} {#SVNAME}: Status of last health check |<p>Status of last health check, one of:</p><p>UNK -> unknown</p><p>INI -> initializing</p><p>SOCKERR -> socket error</p><p>L4OK -> check passed on layer 4, no upper layers testing enabled</p><p>L4TOUT -> layer 1-4 timeout</p><p>L4CON -> layer 1-4 connection problem, for example "Connection refused" (tcp rst) or "No route to host" (icmp)</p><p>L6OK -> check passed on layer 6</p><p>L6TOUT -> layer 6 (SSL) timeout</p><p>L6RSP -> layer 6 invalid response - protocol error</p><p>L7OK -> check passed on layer 7</p><p>L7OKC -> check conditionally passed on layer 7, for example 404 with disable-on-404</p><p>L7TOUT -> layer 7 (HTTP/SMTP) timeout</p><p>L7RSP -> layer 7 invalid response - protocol error</p><p>L7STS -> layer 7 response error, for example HTTP 5xx</p><p>Notice: If a check is currently running, the last known status will be reported, prefixed with "* ". e. g. "* L7OK".</p> |DEPENDENT |haproxy.server.check_status[{#PXNAME},{#SVNAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.pxname == '{#PXNAME}' && @.svname == '{#SVNAME}')].check_status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|Zabbix_raw_items |HAProxy: Get stats |<p>HAProxy Statistics Report in CSV format</p> |HTTP_AGENT |haproxy.get<p>**Preprocessing**:</p><p>- REGEX: `# ([\s\S]*)\n \1`</p><p>- CSV_TO_JSON: ` 1`</p> |
|Zabbix_raw_items |HAProxy: Get nodes |<p>Array for LLD rules.</p> |DEPENDENT |haproxy.get.nodes<p>**Preprocessing**:</p><p>- JAVASCRIPT: `return JSON.stringify(JSON.parse(value),['mode','pxname','svname'])`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
@@ -151,24 +150,24 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|HAProxy: Version has changed (new version: {ITEM.VALUE}) |<p>HAProxy version has changed. Ack to close.</p> |`last(/TEMPLATE_NAME/haproxy.version,#1)<>last(/TEMPLATE_NAME/haproxy.version,#2) and length(last(/TEMPLATE_NAME/haproxy.version))>0` |INFO |<p>Manual close: YES</p> |
-|HAProxy: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/TEMPLATE_NAME/haproxy.uptime)<10m` |INFO |<p>Manual close: YES</p> |
-|HAProxy: Service is down |<p>-</p> |`last(/TEMPLATE_NAME/net.tcp.service["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
-|HAProxy: Service response time is too high (over {$HAPROXY.RESPONSE_TIME.MAX.WARN} for 5m) |<p>-</p> |`min(/TEMPLATE_NAME/net.tcp.service.perf["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"],5m)>{$HAPROXY.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- HAProxy: Service is down</p> |
-|HAProxy backend {#PXNAME}: Server is DOWN |<p>Backend is not available.</p> |`count(/TEMPLATE_NAME/haproxy.backend.status[{#PXNAME},{#SVNAME}],#5,"eq","DOWN")=5` |AVERAGE | |
-|HAProxy backend {#PXNAME}: Average response time is more than {$HAPROXY.BACK_RTIME.MAX.WARN} for 5m |<p>Average backend response time (in ms) for the last 1,024 requests is more than {$HAPROXY.BACK_RTIME.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.backend.rtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_RTIME.MAX.WARN}` |WARNING | |
-|HAProxy backend {#PXNAME}: Number of responses with error is more than {$HAPROXY.BACK_ERESP.MAX.WARN} for 5m |<p>Number of requests on backend, whose responses yielded an error, is more than {$HAPROXY.BACK_ERESP.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.backend.eresp.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_ERESP.MAX.WARN}` |WARNING | |
-|HAProxy backend {#PXNAME}: Current number of requests unassigned in queue is more than {$HAPROXY.BACK_QCUR.MAX.WARN} for 5m |<p>Current number of requests on backend unassigned in queue is more than {$HAPROXY.BACK_QCUR.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.backend.qcur[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_QCUR.MAX.WARN}` |WARNING | |
-|HAProxy backend {#PXNAME}: Average time spent in queue is more than {$HAPROXY.BACK_QTIME.MAX.WARN} for 5m |<p>Average time spent in queue (in ms) for the last 1,024 requests is more than {$HAPROXY.BACK_QTIME.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.backend.qtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_QTIME.MAX.WARN}` |WARNING | |
-|HAProxy frontend {#PXNAME}: Session utilization is more than {$HAPROXY.FRONT_SUTIL.MAX.WARN}% for 5m |<p>Alerting on this metric is essential to ensure your server has sufficient capacity to handle all concurrent sessions. Unlike requests, upon reaching the session limit HAProxy will deny additional clients until resource consumption drops. Furthermore, if you find your session usage percentage to be hovering above 80%, it could be time to either modify HAProxy’s configuration to allow more sessions, or migrate your HAProxy server to a bigger box.</p> |`min(/TEMPLATE_NAME/haproxy.frontend.sutil[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_SUTIL.MAX.WARN}` |WARNING | |
-|HAProxy frontend {#PXNAME}: Number of request errors is more than {$HAPROXY.FRONT_EREQ.MAX.WARN} for 5m |<p>Number of request errors is more than {$HAPROXY.FRONT_EREQ.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.frontend.ereq.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_EREQ.MAX.WARN}` |WARNING | |
-|HAProxy frontend {#PXNAME}: Number of requests denied is more than {$HAPROXY.FRONT_DREQ.MAX.WARN} for 5m |<p>Number of requests denied due to security concerns (ACL-restricted) is more than {$HAPROXY.FRONT_DREQ.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.frontend.dreq.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_DREQ.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Server is DOWN |<p>Server is not available.</p> |`count(/TEMPLATE_NAME/haproxy.server.status[{#PXNAME},{#SVNAME}],#5,"eq","DOWN")=5` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Average response time is more than {$HAPROXY.SERVER_RTIME.MAX.WARN} for 5m |<p>Average server response time (in ms) for the last 1,024 requests is more than {$HAPROXY.SERVER_RTIME.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.server.rtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_RTIME.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Number of responses with error is more than {$HAPROXY.SERVER_ERESP.MAX.WARN} for 5m |<p>Number of requests on server, whose responses yielded an error, is more than {$HAPROXY.SERVER_ERESP.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.server.eresp.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_ERESP.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Current number of requests unassigned in queue is more than {$HAPROXY.SERVER_QCUR.MAX.WARN} for 5m |<p>Current number of requests unassigned in queue is more than {$HAPROXY.SERVER_QCUR.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.server.qcur[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_QCUR.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Average time spent in queue is more than {$HAPROXY.SERVER_QTIME.MAX.WARN} for 5m |<p>Average time spent in queue (in ms) for the last 1,024 requests is more than {$HAPROXY.SERVER_QTIME.MAX.WARN}.</p> |`min(/TEMPLATE_NAME/haproxy.server.qtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_QTIME.MAX.WARN}` |WARNING | |
-|HAProxy {#PXNAME} {#SVNAME}: Health check error |<p>Please check the server for faults.</p> |`find(/TEMPLATE_NAME/haproxy.server.check_status[{#PXNAME},{#SVNAME}],#3,"regexp","(?:L[4-7]OK|^$)")=0` |WARNING |<p>**Depends on**:</p><p>- HAProxy {#PXNAME} {#SVNAME}: Server is DOWN</p> |
+|HAProxy: Version has changed (new version: {ITEM.VALUE}) |<p>HAProxy version has changed. Ack to close.</p> |`last(/HAProxy by HTTP/haproxy.version,#1)<>last(/HAProxy by HTTP/haproxy.version,#2) and length(last(/HAProxy by HTTP/haproxy.version))>0` |INFO |<p>Manual close: YES</p> |
+|HAProxy: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/HAProxy by HTTP/haproxy.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|HAProxy: Service is down |<p>-</p> |`last(/HAProxy by HTTP/net.tcp.service["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|HAProxy: Service response time is too high (over {$HAPROXY.RESPONSE_TIME.MAX.WARN} for 5m) |<p>-</p> |`min(/HAProxy by HTTP/net.tcp.service.perf["{$HAPROXY.STATS.SCHEME}","{HOST.CONN}","{$HAPROXY.STATS.PORT}"],5m)>{$HAPROXY.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- HAProxy: Service is down</p> |
+|HAProxy backend {#PXNAME}: Server is DOWN |<p>Backend is not available.</p> |`count(/HAProxy by HTTP/haproxy.backend.status[{#PXNAME},{#SVNAME}],#5,"eq","DOWN")=5` |AVERAGE | |
+|HAProxy backend {#PXNAME}: Average response time is more than {$HAPROXY.BACK_RTIME.MAX.WARN} for 5m |<p>Average backend response time (in ms) for the last 1,024 requests is more than {$HAPROXY.BACK_RTIME.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.backend.rtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_RTIME.MAX.WARN}` |WARNING | |
+|HAProxy backend {#PXNAME}: Number of responses with error is more than {$HAPROXY.BACK_ERESP.MAX.WARN} for 5m |<p>Number of requests on backend, whose responses yielded an error, is more than {$HAPROXY.BACK_ERESP.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.backend.eresp.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_ERESP.MAX.WARN}` |WARNING | |
+|HAProxy backend {#PXNAME}: Current number of requests unassigned in queue is more than {$HAPROXY.BACK_QCUR.MAX.WARN} for 5m |<p>Current number of requests on backend unassigned in queue is more than {$HAPROXY.BACK_QCUR.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.backend.qcur[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_QCUR.MAX.WARN}` |WARNING | |
+|HAProxy backend {#PXNAME}: Average time spent in queue is more than {$HAPROXY.BACK_QTIME.MAX.WARN} for 5m |<p>Average time spent in queue (in ms) for the last 1,024 requests is more than {$HAPROXY.BACK_QTIME.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.backend.qtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.BACK_QTIME.MAX.WARN}` |WARNING | |
+|HAProxy frontend {#PXNAME}: Session utilization is more than {$HAPROXY.FRONT_SUTIL.MAX.WARN}% for 5m |<p>Alerting on this metric is essential to ensure your server has sufficient capacity to handle all concurrent sessions. Unlike requests, upon reaching the session limit HAProxy will deny additional clients until resource consumption drops. Furthermore, if you find your session usage percentage to be hovering above 80%, it could be time to either modify HAProxy's configuration to allow more sessions, or migrate your HAProxy server to a bigger box.</p> |`min(/HAProxy by HTTP/haproxy.frontend.sutil[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_SUTIL.MAX.WARN}` |WARNING | |
+|HAProxy frontend {#PXNAME}: Number of request errors is more than {$HAPROXY.FRONT_EREQ.MAX.WARN} for 5m |<p>Number of request errors is more than {$HAPROXY.FRONT_EREQ.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.frontend.ereq.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_EREQ.MAX.WARN}` |WARNING | |
+|HAProxy frontend {#PXNAME}: Number of requests denied is more than {$HAPROXY.FRONT_DREQ.MAX.WARN} for 5m |<p>Number of requests denied due to security concerns (ACL-restricted) is more than {$HAPROXY.FRONT_DREQ.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.frontend.dreq.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_DREQ.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Server is DOWN |<p>Server is not available.</p> |`count(/HAProxy by HTTP/haproxy.server.status[{#PXNAME},{#SVNAME}],#5,"eq","DOWN")=5` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Average response time is more than {$HAPROXY.SERVER_RTIME.MAX.WARN} for 5m |<p>Average server response time (in ms) for the last 1,024 requests is more than {$HAPROXY.SERVER_RTIME.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.server.rtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_RTIME.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Number of responses with error is more than {$HAPROXY.SERVER_ERESP.MAX.WARN} for 5m |<p>Number of requests on server, whose responses yielded an error, is more than {$HAPROXY.SERVER_ERESP.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.server.eresp.rate[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_ERESP.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Current number of requests unassigned in queue is more than {$HAPROXY.SERVER_QCUR.MAX.WARN} for 5m |<p>Current number of requests unassigned in queue is more than {$HAPROXY.SERVER_QCUR.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.server.qcur[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_QCUR.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Average time spent in queue is more than {$HAPROXY.SERVER_QTIME.MAX.WARN} for 5m |<p>Average time spent in queue (in ms) for the last 1,024 requests is more than {$HAPROXY.SERVER_QTIME.MAX.WARN}.</p> |`min(/HAProxy by HTTP/haproxy.server.qtime[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.SERVER_QTIME.MAX.WARN}` |WARNING | |
+|HAProxy {#PXNAME} {#SVNAME}: Health check error |<p>Please check the server for faults.</p> |`find(/HAProxy by HTTP/haproxy.server.check_status[{#PXNAME},{#SVNAME}],#3,"regexp","(?:L[4-7]OK|^$)")=0` |WARNING |<p>**Depends on**:</p><p>- HAProxy {#PXNAME} {#SVNAME}: Server is DOWN</p> |
## Feedback
diff --git a/templates/app/haproxy_http/template_app_haproxy_http.yaml b/templates/app/haproxy_http/template_app_haproxy_http.yaml
index 954cefc7f3b..5ac09ac3312 100644
--- a/templates/app/haproxy_http/template_app_haproxy_http.yaml
+++ b/templates/app/haproxy_http/template_app_haproxy_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-08-09T08:38:38Z'
+ date: '2021-12-19T15:19:40Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -23,7 +23,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/393527-discussion-thread-for-official-zabbix-template-haproxy
- Template tooling version used: 0.39
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -1721,7 +1721,7 @@ zabbix_export:
expression: 'min(/HAProxy by HTTP/haproxy.frontend.sutil[{#PXNAME},{#SVNAME}],5m)>{$HAPROXY.FRONT_SUTIL.MAX.WARN}'
name: 'HAProxy frontend {#PXNAME}: Session utilization is more than {$HAPROXY.FRONT_SUTIL.MAX.WARN}% for 5m'
priority: WARNING
- description: 'Alerting on this metric is essential to ensure your server has sufficient capacity to handle all concurrent sessions. Unlike requests, upon reaching the session limit HAProxy will deny additional clients until resource consumption drops. Furthermore, if you find your session usage percentage to be hovering above 80%, it could be time to either modify HAProxy’s configuration to allow more sessions, or migrate your HAProxy server to a bigger box.'
+ description: 'Alerting on this metric is essential to ensure your server has sufficient capacity to handle all concurrent sessions. Unlike requests, upon reaching the session limit HAProxy will deny additional clients until resource consumption drops. Furthermore, if you find your session usage percentage to be hovering above 80%, it could be time to either modify HAProxy''s configuration to allow more sessions, or migrate your HAProxy server to a bigger box.'
graph_prototypes:
-
uuid: 29c858f57e5447758c9bb39008ba7aa9
@@ -2934,7 +2934,7 @@ zabbix_export:
-
type: GRAPH_PROTOTYPE
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -2947,7 +2947,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -2956,9 +2956,9 @@ zabbix_export:
host: 'HAProxy by HTTP'
-
type: GRAPH_PROTOTYPE
- 'y': '12'
+ 'y': '5'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -2971,7 +2971,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -2980,9 +2980,9 @@ zabbix_export:
host: 'HAProxy by HTTP'
-
type: GRAPH_PROTOTYPE
- 'y': '24'
+ 'y': '10'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -2995,7 +2995,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3011,7 +3011,7 @@ zabbix_export:
-
type: GRAPH_PROTOTYPE
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3024,7 +3024,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3033,9 +3033,9 @@ zabbix_export:
host: 'HAProxy by HTTP'
-
type: GRAPH_PROTOTYPE
- 'y': '12'
+ 'y': '5'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3048,7 +3048,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3057,9 +3057,9 @@ zabbix_export:
host: 'HAProxy by HTTP'
-
type: GRAPH_PROTOTYPE
- 'y': '24'
+ 'y': '10'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3072,7 +3072,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3081,9 +3081,9 @@ zabbix_export:
host: 'HAProxy by HTTP'
-
type: GRAPH_PROTOTYPE
- 'y': '36'
+ 'y': '15'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3096,7 +3096,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3112,7 +3112,7 @@ zabbix_export:
-
type: GRAPH_PROTOTYPE
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3125,7 +3125,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3134,9 +3134,9 @@ zabbix_export:
host: 'HAProxy by HTTP'
-
type: GRAPH_PROTOTYPE
- 'y': '12'
+ 'y': '5'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3149,7 +3149,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
@@ -3158,9 +3158,9 @@ zabbix_export:
host: 'HAProxy by HTTP'
-
type: GRAPH_PROTOTYPE
- 'y': '24'
+ 'y': '10'
width: '24'
- height: '12'
+ height: '5'
fields:
-
type: INTEGER
@@ -3173,7 +3173,7 @@ zabbix_export:
-
type: INTEGER
name: rows
- value: '3'
+ value: '1'
-
type: GRAPH_PROTOTYPE
name: graphid
diff --git a/templates/app/iis_agent/README.md b/templates/app/iis_agent/README.md
index 759d62419a8..67c8108fe68 100644
--- a/templates/app/iis_agent/README.md
+++ b/templates/app/iis_agent/README.md
@@ -43,7 +43,7 @@ No specific Zabbix configuration is required.
|{$IIS.PORT} |<p>Listening port.</p> |`80` |
|{$IIS.QUEUE.MAX.TIME} |<p>The time during which the queue length may exceed the threshold.</p> |`5m` |
|{$IIS.QUEUE.MAX.WARN} |<p>Maximum application pool's request queue length for trigger expression.</p> |`` |
-|{$IIS.SERVICE} |<p>The service (http/https/etc) for port check. See "net.tcp.service" documentation page for more information.</p> |`http` |
+|{$IIS.SERVICE} |<p>The service (http/https/etc) for port check. See "net.tcp.service" documentation page for more information: https://www.zabbix.com/documentation/5.4/manual/config/items/itemtypes/simple_checks</p> |`http` |
## Template links
diff --git a/templates/app/iis_agent/template_app_iis_agent.yaml b/templates/app/iis_agent/template_app_iis_agent.yaml
index 364202e5ae9..77ad7ce124a 100644
--- a/templates/app/iis_agent/template_app_iis_agent.yaml
+++ b/templates/app/iis_agent/template_app_iis_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-12-09T12:17:20Z'
+ date: '2021-12-19T15:19:41Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -766,7 +766,7 @@ zabbix_export:
-
macro: '{$IIS.SERVICE}'
value: http
- description: 'The service (http/https/etc) for port check. See "net.tcp.service" documentation page for more information.'
+ description: 'The service (http/https/etc) for port check. See "net.tcp.service" documentation page for more information: https://www.zabbix.com/documentation/5.4/manual/config/items/itemtypes/simple_checks'
valuemaps:
-
uuid: 63d1ad6b4cf643da870b14c53da6c52f
diff --git a/templates/app/iis_agent_active/README.md b/templates/app/iis_agent_active/README.md
index 90d11731732..cdc3060486d 100644
--- a/templates/app/iis_agent_active/README.md
+++ b/templates/app/iis_agent_active/README.md
@@ -43,7 +43,7 @@ No specific Zabbix configuration is required.
|{$IIS.PORT} |<p>Listening port.</p> |`80` |
|{$IIS.QUEUE.MAX.TIME} |<p>The time during which the queue length may exceed the threshold.</p> |`5m` |
|{$IIS.QUEUE.MAX.WARN} |<p>Maximum application pool's request queue length for trigger expression.</p> |`` |
-|{$IIS.SERVICE} |<p>The service (http/https/etc) for port check. See "net.tcp.service" documentation page for more information.</p> |`http` |
+|{$IIS.SERVICE} |<p>The service (http/https/etc) for port check. See "net.tcp.service" documentation page for more information: https://www.zabbix.com/documentation/5.4/manual/config/items/itemtypes/simple_checks</p> |`http` |
## Template links
diff --git a/templates/app/iis_agent_active/template_app_iis_agent_active.yaml b/templates/app/iis_agent_active/template_app_iis_agent_active.yaml
index 5f65f2fcd36..7d2a99d0fd3 100644
--- a/templates/app/iis_agent_active/template_app_iis_agent_active.yaml
+++ b/templates/app/iis_agent_active/template_app_iis_agent_active.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-12-09T12:17:21Z'
+ date: '2021-12-19T15:19:42Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -807,7 +807,7 @@ zabbix_export:
-
macro: '{$IIS.SERVICE}'
value: http
- description: 'The service (http/https/etc) for port check. See "net.tcp.service" documentation page for more information.'
+ description: 'The service (http/https/etc) for port check. See "net.tcp.service" documentation page for more information: https://www.zabbix.com/documentation/5.4/manual/config/items/itemtypes/simple_checks'
valuemaps:
-
uuid: 636c6915a8154b6ea1ea5b00f015c149
diff --git a/templates/app/jenkins/README.md b/templates/app/jenkins/README.md
index 7733db22955..ae0c274a06f 100644
--- a/templates/app/jenkins/README.md
+++ b/templates/app/jenkins/README.md
@@ -17,11 +17,11 @@ This template was tested on:
> See [Zabbix template operation](https://www.zabbix.com/documentation/5.4/manual/config/templates_out_of_the_box/http) for basic instructions.
-Metrics are collected by requests to [Metrics API](https://plugins.jenkins.io/metrics/).
+Metrics are collected by requests to [Metrics API](https://plugins.jenkins.io/metrics/).
For common metrics:
- Install and configure Metrics plugin parameters according [official documentations](https://plugins.jenkins.io/metrics/). Do not forget to configure access to the Metrics Servlet by issuing API key and change macro {$JENKINS.API.KEY}.
+ Install and configure Metrics plugin parameters according [official documentations](https://plugins.jenkins.io/metrics/). Do not forget to configure access to the Metrics Servlet by issuing API key and change macro {$JENKINS.API.KEY}.
-For monitoring computers and builds:
+For monitoring computers and builds:
Create API token for monitoring user according [official documentations](https://www.jenkins.io/doc/book/system-administration/authenticating-scripted-clients/) and change macro {$JENKINS.USER}, {$JENKINS.API.TOKEN}.
Don't forget to change macros {$JENKINS.URL}.
@@ -50,8 +50,8 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Jobs discovery | |HTTP_AGENT |jenkins.jobs<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[*]`</p> |
-|Computers discovery | |HTTP_AGENT |jenkins.computers<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[*]`</p> |
+|Jobs discovery |<p>-</p> |HTTP_AGENT |jenkins.jobs<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[*]`</p> |
+|Computers discovery |<p>-</p> |HTTP_AGENT |jenkins.computers<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[*]`</p> |
## Items collected
@@ -61,10 +61,10 @@ There are no template links in this template.
|Jenkins |Jenkins: Temporary space check message |<p>The message will reference the first node which fails this check. There may be other nodes that fail the check, but this health check is designed to fail fast.</p> |DEPENDENT |jenkins.temporary_space.message<p>**Preprocessing**:</p><p>- JSONPATH: `$['temporary-space'].message`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Jenkins |Jenkins: Plugins check message |<p>The message of plugins health check.</p> |DEPENDENT |jenkins.plugins.message<p>**Preprocessing**:</p><p>- JSONPATH: `$['plugins'].message`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Jenkins |Jenkins: Thread deadlock check message |<p>The message of thread deadlock health check.</p> |DEPENDENT |jenkins.thread_deadlock.message<p>**Preprocessing**:</p><p>- JSONPATH: `$['thread-deadlock'].message`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|Jenkins |Jenkins: Disk space check |<p>Returns FAIL if any of the Jenkins disk space monitors are reporting the disk space as less than the configured threshold.</p> |DEPENDENT |jenkins.disk_space<p>**Preprocessing**:</p><p>- JSONPATH: `$['disk-space'].healthy`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Jenkins |Jenkins: Plugins check |<p>Returns FAIL if any of the Jenkins plugins failed to start.</p> |DEPENDENT |jenkins.plugins<p>**Preprocessing**:</p><p>- JSONPATH: `$.plugins.healthy`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Jenkins |Jenkins: Temporary space check |<p>Returns FAIL if any of the Jenkins temporary space monitors are reporting the temporary space as less than the configured threshold.</p> |DEPENDENT |jenkins.temporary_space<p>**Preprocessing**:</p><p>- JSONPATH: `$['temporary-space'].healthy`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Jenkins |Jenkins: Thread deadlock check |<p>Returns FAIL if there are any deadlocked threads in the Jenkins master JVM.</p> |DEPENDENT |jenkins.thread_deadlock<p>**Preprocessing**:</p><p>- JSONPATH: `$['thread-deadlock'].healthy`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Jenkins |Jenkins: Disk space check |<p>Returns FAIL if any of the Jenkins disk space monitors are reporting the disk space as less than the configured threshold.</p> |DEPENDENT |jenkins.disk_space<p>**Preprocessing**:</p><p>- JSONPATH: `$['disk-space'].healthy`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Jenkins |Jenkins: Plugins check |<p>Returns FAIL if any of the Jenkins plugins failed to start.</p> |DEPENDENT |jenkins.plugins<p>**Preprocessing**:</p><p>- JSONPATH: `$.plugins.healthy`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Jenkins |Jenkins: Temporary space check |<p>Returns FAIL if any of the Jenkins temporary space monitors are reporting the temporary space as less than the configured threshold.</p> |DEPENDENT |jenkins.temporary_space<p>**Preprocessing**:</p><p>- JSONPATH: `$['temporary-space'].healthy`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Jenkins |Jenkins: Thread deadlock check |<p>Returns FAIL if there are any deadlocked threads in the Jenkins master JVM.</p> |DEPENDENT |jenkins.thread_deadlock<p>**Preprocessing**:</p><p>- JSONPATH: `$['thread-deadlock'].healthy`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Jenkins |Jenkins: Executors count |<p>The number of executors available to Jenkins. This is corresponds to the sum of all the executors of all the on-line nodes.</p> |DEPENDENT |jenkins.executor.count<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['jenkins.executor.count.value'].value`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Jenkins |Jenkins: Executors free |<p>The number of executors available to Jenkins that are not currently in use.</p> |DEPENDENT |jenkins.executor.free<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['jenkins.executor.free.value'].value`</p> |
|Jenkins |Jenkins: Executors in use |<p>The number of executors available to Jenkins that are currently in use.</p> |DEPENDENT |jenkins.executor.in_use<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['jenkins.executor.in-use.value'].value`</p> |
@@ -108,25 +108,25 @@ There are no template links in this template.
|Jenkins |Jenkins: Build queue, buildable |<p>The number of jobs that are in the Jenkins build queue and currently in the blocked state.</p> |DEPENDENT |jenkins.queue.buildable<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['jenkins.queue.buildable.value'].value`</p> |
|Jenkins |Jenkins: Build queue, pending |<p>The number of jobs that are in the Jenkins build queue and currently in the blocked state.</p> |DEPENDENT |jenkins.queue.pending<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['jenkins.queue.pending.value'].value`</p> |
|Jenkins |Jenkins: Build queue, stuck |<p>The number of jobs that are in the Jenkins build queue and currently in the blocked state.</p> |DEPENDENT |jenkins.queue.stuck<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['jenkins.queue.stuck.value'].value`</p> |
-|Jenkins |Jenkins: HTTP active requests, rate |<p>The number of currently active requests against the Jenkins master Web UI.</p> |DEPENDENT |jenkins.http.active_requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.counters.['http.activeRequests'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 400, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/400 status code.</p> |DEPENDENT |jenkins.http.bad_request.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.badRequest'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 500, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/500 status code.</p> |DEPENDENT |jenkins.http.server_error.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.serverError'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 503, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/503 status code.</p> |DEPENDENT |jenkins.http.service_unavailable.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.serviceUnavailable'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 200, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/200 status code.</p> |DEPENDENT |jenkins.http.ok.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.ok'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response other, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a non-informational status code that is not in the list: HTTP/200, HTTP/201, HTTP/204, HTTP/304, HTTP/400, HTTP/403, HTTP/404, HTTP/500, or HTTP/503.</p> |DEPENDENT |jenkins.http.other.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.other'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 201, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/201 status code.</p> |DEPENDENT |jenkins.http.created.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.created'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 204, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/204 status code.</p> |DEPENDENT |jenkins.http.no_content.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.noContent'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 404, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/404 status code.</p> |DEPENDENT |jenkins.http.not_found.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.notFound'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 304, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/304 status code.</p> |DEPENDENT |jenkins.http.not_modified.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.notModified'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP response 403, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/403 status code.</p> |DEPENDENT |jenkins.http.forbidden.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.forbidden'].count`</p><p>- CHANGE_PER_SECOND |
-|Jenkins |Jenkins: HTTP requests, rate |<p>The rate at which the Jenkins master Web UI is receiving requests.</p> |DEPENDENT |jenkins.http.requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.timers.['http.requests'].count`</p><p>- CHANGE_PER_SECOND |
+|Jenkins |Jenkins: HTTP active requests, rate |<p>The number of currently active requests against the Jenkins master Web UI.</p> |DEPENDENT |jenkins.http.active_requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.counters.['http.activeRequests'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 400, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/400 status code.</p> |DEPENDENT |jenkins.http.bad_request.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.badRequest'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 500, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/500 status code.</p> |DEPENDENT |jenkins.http.server_error.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.serverError'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 503, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/503 status code.</p> |DEPENDENT |jenkins.http.service_unavailable.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.serviceUnavailable'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 200, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/200 status code.</p> |DEPENDENT |jenkins.http.ok.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.ok'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response other, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a non-informational status code that is not in the list: HTTP/200, HTTP/201, HTTP/204, HTTP/304, HTTP/400, HTTP/403, HTTP/404, HTTP/500, or HTTP/503.</p> |DEPENDENT |jenkins.http.other.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.other'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 201, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/201 status code.</p> |DEPENDENT |jenkins.http.created.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.created'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 204, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/204 status code.</p> |DEPENDENT |jenkins.http.no_content.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.noContent'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 404, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/404 status code.</p> |DEPENDENT |jenkins.http.not_found.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.notFound'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 304, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/304 status code.</p> |DEPENDENT |jenkins.http.not_modified.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.notModified'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP response 403, rate |<p>The rate at which the Jenkins master Web UI is responding to requests with a HTTP/403 status code.</p> |DEPENDENT |jenkins.http.forbidden.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.meters.['http.responseCodes.forbidden'].count`</p><p>- CHANGE_PER_SECOND</p> |
+|Jenkins |Jenkins: HTTP requests, rate |<p>The rate at which the Jenkins master Web UI is receiving requests.</p> |DEPENDENT |jenkins.http.requests.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.timers.['http.requests'].count`</p><p>- CHANGE_PER_SECOND</p> |
|Jenkins |Jenkins: HTTP requests, p95 |<p>The time spent generating the corresponding responses.</p> |DEPENDENT |jenkins.http.requests_p95.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.timers.['http.requests'].p95`</p> |
|Jenkins |Jenkins: HTTP requests, median |<p>The time spent generating the corresponding responses.</p> |DEPENDENT |jenkins.http.requests_p50.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.timers.['http.requests'].p50`</p> |
|Jenkins |Jenkins: Version |<p>Version of Jenkins server.</p> |DEPENDENT |jenkins.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['jenkins.versions.core'].value`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|Jenkins |Jenkins: CPU Load |<p>The system load on the Jenkins master as reported by the JVM’s Operating System JMX bean. The calculation of system load is operating system dependent. Typically this is the sum of the number of processes that are currently running plus the number that are waiting to run. This is typically comparable against the number of CPU cores.</p> |DEPENDENT |jenkins.system.cpu.load<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['system.cpu.load'].value`</p> |
+|Jenkins |Jenkins: CPU Load |<p>The system load on the Jenkins master as reported by the JVM's Operating System JMX bean. The calculation of system load is operating system dependent. Typically this is the sum of the number of processes that are currently running plus the number that are waiting to run. This is typically comparable against the number of CPU cores.</p> |DEPENDENT |jenkins.system.cpu.load<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['system.cpu.load'].value`</p> |
|Jenkins |Jenkins: Uptime |<p>The number of seconds since the Jenkins master JVM started.</p> |DEPENDENT |jenkins.system.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['vm.uptime.milliseconds'].value`</p><p>- MULTIPLIER: `0.001`</p> |
|Jenkins |Jenkins: File descriptor ratio |<p>The ratio of used to total file descriptors</p> |DEPENDENT |jenkins.descriptor.ratio<p>**Preprocessing**:</p><p>- JSONPATH: `$.gauges.['vm.file.descriptor.ratio'].value`</p><p>- MULTIPLIER: `100`</p> |
-|Jenkins |Jenkins: Service ping | |HTTP_AGENT |jenkins.ping<p>**Preprocessing**:</p><p>- REGEX: `{$JENKINS.PING.REPLY}$ 1`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|Jenkins |Jenkins: Service ping | |HTTP_AGENT |jenkins.ping<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- REGEX: `{$JENKINS.PING.REPLY}$ 1`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Jenkins |Jenkins job [{#NAME}]: Health score |<p>Represents health of project. A number between 0-100.</p><p>Job Description: {#DESCRIPTION}</p><p>Job Url: {#URL}</p> |DEPENDENT |jenkins.build.health[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].healthReport..score.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Jenkins |Jenkins job [{#NAME}]: Last Build number |<p>Details: {#URL}/lastBuild/</p> |DEPENDENT |jenkins.last_build.number[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastBuild.number.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Jenkins |Jenkins job [{#NAME}]: Last Build duration |<p>Build duration (in seconds).</p> |DEPENDENT |jenkins.last_build.duration[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastBuild.duration.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
@@ -134,15 +134,15 @@ There are no template links in this template.
|Jenkins |Jenkins job [{#NAME}]: Last Build result | |DEPENDENT |jenkins.last_build.result[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastBuild.result.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Jenkins |Jenkins job [{#NAME}]: Last Failed Build number |<p>Details: {#URL}/lastFailedBuild/</p> |DEPENDENT |jenkins.last_failed_build.number[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastFailedBuild.number.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Jenkins |Jenkins job [{#NAME}]: Last Failed Build duration |<p>Build duration (in seconds).</p> |DEPENDENT |jenkins.last_failed_build.duration[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastFailedBuild.duration.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-|Jenkins |Jenkins job [{#NAME}]: Last Failed Build timestamp | |DEPENDENT |jenkins.last_failed_build.timestamp[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastFailedBuild.timestamp.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|Jenkins |Jenkins job [{#NAME}]: Last Failed Build timestamp |<p>-</p> |DEPENDENT |jenkins.last_failed_build.timestamp[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastFailedBuild.timestamp.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Jenkins |Jenkins job [{#NAME}]: Last Successful Build number |<p>Details: {#URL}/lastSuccessfulBuild/</p> |DEPENDENT |jenkins.last_successful_build.number[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastSuccessfulBuild.number.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Jenkins |Jenkins job [{#NAME}]: Last Successful Build duration |<p>Build duration (in seconds).</p> |DEPENDENT |jenkins.last_successful_build.duration[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastSuccessfulBuild.duration.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-|Jenkins |Jenkins job [{#NAME}]: Last Successful Build timestamp | |DEPENDENT |jenkins.last_successful_build.timestamp[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastSuccessfulBuild.timestamp.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|Jenkins |Jenkins job [{#NAME}]: Last Successful Build timestamp |<p>-</p> |DEPENDENT |jenkins.last_successful_build.timestamp[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.[?(@.name == "{#NAME}")].lastSuccessfulBuild.timestamp.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Executors |<p>The maximum number of concurrent builds that Jenkins may perform on this node.</p> |DEPENDENT |jenkins.computer.numExecutors[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].numExecutors.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: State |<p>Represents the actual online/offline state.</p><p>Node description: {#DESCRIPTION}</p> |DEPENDENT |jenkins.computer.state[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].offline.first()`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: State |<p>Represents the actual online/offline state.</p><p>Node description: {#DESCRIPTION}</p> |DEPENDENT |jenkins.computer.state[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].offline.first()`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Offline cause reason |<p>If the computer was offline (either temporarily or not), will return the cause as a string (without user info). Empty string if the system was put offline without given a cause.</p> |DEPENDENT |jenkins.computer.offline.reason[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].offlineCauseReason.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Idle |<p>Returns true if all the executors of this computer are idle.</p> |DEPENDENT |jenkins.computer.idle[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].idle.first()`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Temporarily offline |<p>Returns true if this node is marked temporarily offline.</p> |DEPENDENT |jenkins.computer.temp_offline[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].temporarilyOffline.first()`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Idle |<p>Returns true if all the executors of this computer are idle.</p> |DEPENDENT |jenkins.computer.idle[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].idle.first()`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Temporarily offline |<p>Returns true if this node is marked temporarily offline.</p> |DEPENDENT |jenkins.computer.temp_offline[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].temporarilyOffline.first()`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Available disk space |<p>The available disk space of $JENKINS_HOME on agent.</p> |DEPENDENT |jenkins.computer.disk_space[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].monitorData['hudson.node_monitors.DiskSpaceMonitor'].size.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Available temp space |<p>The available disk space of the temporary directory. Java tools and tests/builds often create files in the temporary directory, and may not function properly if there's no available space.</p> |DEPENDENT |jenkins.computer.temp_space[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].monitorData['hudson.node_monitors.TemporarySpaceMonitor'].size.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Response time average |<p>The round trip network response time from the master to the agent</p> |DEPENDENT |jenkins.computer.response_time[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].monitorData['hudson.node_monitors.ResponseTimeMonitor'].average.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p> |
@@ -151,27 +151,27 @@ There are no template links in this template.
|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Total physical memory |<p>Total physical memory of the system, in bytes.</p> |DEPENDENT |jenkins.computer.total_physical_memory[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].monitorData['hudson.node_monitors.SwapSpaceMonitor'].totalPhysicalMemory.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Total swap space |<p>Total number of swap space in bytes.</p> |DEPENDENT |jenkins.computer.total_swap_space[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].monitorData['hudson.node_monitors.SwapSpaceMonitor'].totalSwapSpace.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Jenkins |Jenkins: Computer [{#DISPLAY_NAME}]: Clock difference |<p>The clock difference between the master and nodes.</p> |DEPENDENT |jenkins.computer.clock_difference[{#DISPLAY_NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.computer.[?(@.displayName == "{#DISPLAY_NAME}")].monitorData['hudson.node_monitors.ClockMonitor'].diff.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `0.001`</p> |
-|Zabbix_raw_items |Jenkins: Get service metrics |<p>-</p> |HTTP_AGENT |jenkins.get_metrics |
-|Zabbix_raw_items |Jenkins: Get healthcheck | |HTTP_AGENT |jenkins.healthcheck |
-|Zabbix_raw_items |Jenkins: Get jobs info |<p>-</p> |HTTP_AGENT |jenkins.job_info |
-|Zabbix_raw_items |Jenkins: Get computer info |<p>-</p> |HTTP_AGENT |jenkins.computer_info |
+|Zabbix_raw_items |Jenkins: Get service metrics |<p>-</p> |HTTP_AGENT |jenkins.get_metrics<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Zabbix_raw_items |Jenkins: Get healthcheck | |HTTP_AGENT |jenkins.healthcheck<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Zabbix_raw_items |Jenkins: Get jobs info |<p>-</p> |HTTP_AGENT |jenkins.job_info<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Zabbix_raw_items |Jenkins: Get computer info |<p>-</p> |HTTP_AGENT |jenkins.computer_info<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Jenkins: Disk space is too low |<p>Jenkins disk space monitors are reporting the disk space as less than the configured threshold. The message will reference the first node which fails this check.</p><p>Health check message: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`{TEMPLATE_NAME:jenkins.disk_space.last()}=0 and {Jenkins by HTTP:jenkins.disk_space.message.strlen()}>0` |WARNING | |
-|Jenkins: One or more Jenkins plugins failed to start |<p>A failure is typically indicative of a potential issue within the Jenkins installation that will either be solved by explicitly disabling the failing plugin(s) or by resolving the corresponding plugin dependency issues. </p><p>Health check message: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`{TEMPLATE_NAME:jenkins.plugins.last()}=0 and {Jenkins by HTTP:jenkins.plugins.message.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Jenkins: Temporary space is too low |<p>Jenkins temporary space monitors are reporting the temporary space as less than the configured threshold. The message will reference the first node which fails this check.</p><p>Health check message: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`{TEMPLATE_NAME:jenkins.temporary_space.last()}=0 and {Jenkins by HTTP:jenkins.temporary_space.message.strlen()}>0` |WARNING | |
-|Jenkins: There are deadlocked threads in Jenkins master JVM |<p>There are any deadlocked threads in the Jenkins master JVM.</p><p>Health check message: {{ITEM.LASTVALUE2}.regsub('(.*)',\1)}</p> |`{TEMPLATE_NAME:jenkins.thread_deadlock.last()}=0 and {Jenkins by HTTP:jenkins.thread_deadlock.message.strlen()}>0` |WARNING | |
-|Jenkins: Service has no online nodes |<p>-</p> |`{TEMPLATE_NAME:jenkins.node.online.last()}=0` |AVERAGE | |
-|Jenkins: Version has changed (new version: {ITEM.VALUE}) |<p>Jenkins version has changed. Ack to close.</p> |`{TEMPLATE_NAME:jenkins.version.diff()}=1 and {TEMPLATE_NAME:jenkins.version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Jenkins: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:jenkins.system.uptime.last()}<10m` |INFO |<p>Manual close: YES</p> |
-|Jenkins: Current number of used files is too high (over {$JENKINS.FILE_DESCRIPTORS.MAX.WARN}% for 5m) |<p>-</p> |`{TEMPLATE_NAME:jenkins.descriptor.ratio.min(5m)}>{$JENKINS.FILE_DESCRIPTORS.MAX.WARN}` |WARNING | |
-|Jenkins: Service is down |<p>-</p> |`{TEMPLATE_NAME:jenkins.ping.last()}=0` |AVERAGE |<p>Manual close: YES</p> |
-|Jenkins job [{#NAME}]: Job is unhealthy |<p>-</p> |`{TEMPLATE_NAME:jenkins.build.health[{#NAME}].last()}<{$JENKINS.JOB.HEALTH.SCORE.MIN.WARN}` |WARNING |<p>Manual close: YES</p> |
-|Jenkins: Computer [{#DISPLAY_NAME}]: Node is down |<p>Node down with reason: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`{TEMPLATE_NAME:jenkins.computer.state[{#DISPLAY_NAME}].last()}=1 and {Jenkins by HTTP:jenkins.computer.offline.reason[{#DISPLAY_NAME}].strlen()}>0` |AVERAGE |<p>**Depends on**:</p><p>- Jenkins: Computer [{#DISPLAY_NAME}]: Node is temporarily offline</p> |
-|Jenkins: Computer [{#DISPLAY_NAME}]: Node is temporarily offline |<p>Node is temporarily Offline with reason: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`{TEMPLATE_NAME:jenkins.computer.temp_offline[{#DISPLAY_NAME}].last()}=1 and {Jenkins by HTTP:jenkins.computer.offline.reason[{#DISPLAY_NAME}].strlen()}>0` |INFO |<p>Manual close: YES</p> |
+|Jenkins: Disk space is too low |<p>Jenkins disk space monitors are reporting the disk space as less than the configured threshold. The message will reference the first node which fails this check.</p><p>Health check message: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`last(/Jenkins by HTTP/jenkins.disk_space)=0 and length(last(/Jenkins by HTTP/jenkins.disk_space.message))>0` |WARNING | |
+|Jenkins: One or more Jenkins plugins failed to start |<p>A failure is typically indicative of a potential issue within the Jenkins installation that will either be solved by explicitly disabling the failing plugin(s) or by resolving the corresponding plugin dependency issues.</p><p>Health check message: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`last(/Jenkins by HTTP/jenkins.plugins)=0 and length(last(/Jenkins by HTTP/jenkins.plugins.message))>0` |INFO |<p>Manual close: YES</p> |
+|Jenkins: Temporary space is too low |<p>Jenkins temporary space monitors are reporting the temporary space as less than the configured threshold. The message will reference the first node which fails this check.</p><p>Health check message: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`last(/Jenkins by HTTP/jenkins.temporary_space)=0 and length(last(/Jenkins by HTTP/jenkins.temporary_space.message))>0` |WARNING | |
+|Jenkins: There are deadlocked threads in Jenkins master JVM |<p>There are any deadlocked threads in the Jenkins master JVM.</p><p>Health check message: {{ITEM.LASTVALUE2}.regsub('(.*)',\1)}</p> |`last(/Jenkins by HTTP/jenkins.thread_deadlock)=0 and length(last(/Jenkins by HTTP/jenkins.thread_deadlock.message))>0` |WARNING | |
+|Jenkins: Service has no online nodes |<p>-</p> |`last(/Jenkins by HTTP/jenkins.node.online)=0` |AVERAGE | |
+|Jenkins: Version has changed (new version: {ITEM.VALUE}) |<p>Jenkins version has changed. Ack to close.</p> |`last(/Jenkins by HTTP/jenkins.version,#1)<>last(/Jenkins by HTTP/jenkins.version,#2) and length(last(/Jenkins by HTTP/jenkins.version))>0` |INFO |<p>Manual close: YES</p> |
+|Jenkins: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Jenkins by HTTP/jenkins.system.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|Jenkins: Current number of used files is too high (over {$JENKINS.FILE_DESCRIPTORS.MAX.WARN}% for 5m) |<p>-</p> |`min(/Jenkins by HTTP/jenkins.descriptor.ratio,5m)>{$JENKINS.FILE_DESCRIPTORS.MAX.WARN}` |WARNING | |
+|Jenkins: Service is down |<p>-</p> |`last(/Jenkins by HTTP/jenkins.ping)=0` |AVERAGE |<p>Manual close: YES</p> |
+|Jenkins job [{#NAME}]: Job is unhealthy |<p>-</p> |`last(/Jenkins by HTTP/jenkins.build.health[{#NAME}])<{$JENKINS.JOB.HEALTH.SCORE.MIN.WARN}` |WARNING |<p>Manual close: YES</p> |
+|Jenkins: Computer [{#DISPLAY_NAME}]: Node is down |<p>Node down with reason: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`last(/Jenkins by HTTP/jenkins.computer.state[{#DISPLAY_NAME}])=1 and length(last(/Jenkins by HTTP/jenkins.computer.offline.reason[{#DISPLAY_NAME}]))>0` |AVERAGE |<p>**Depends on**:</p><p>- Jenkins: Computer [{#DISPLAY_NAME}]: Node is temporarily offline</p><p>- Jenkins: Service has no online nodes</p> |
+|Jenkins: Computer [{#DISPLAY_NAME}]: Node is temporarily offline |<p>Node is temporarily Offline with reason: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}</p> |`last(/Jenkins by HTTP/jenkins.computer.temp_offline[{#DISPLAY_NAME}])=1 and length(last(/Jenkins by HTTP/jenkins.computer.offline.reason[{#DISPLAY_NAME}]))>0` |INFO |<p>Manual close: YES</p> |
## Feedback
diff --git a/templates/app/jenkins/template_app_jenkins.yaml b/templates/app/jenkins/template_app_jenkins.yaml
index b8e47e6e5d2..c7270914589 100644
--- a/templates/app/jenkins/template_app_jenkins.yaml
+++ b/templates/app/jenkins/template_app_jenkins.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:25Z'
+ date: '2021-12-19T15:19:42Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -17,7 +17,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -204,7 +204,6 @@ zabbix_export:
parameters:
- ''
url: '{$JENKINS.URL}/metrics/{$JENKINS.API.KEY}/metrics'
- status_codes: ''
tags:
-
tag: Application
@@ -223,7 +222,6 @@ zabbix_export:
parameters:
- ''
url: '{$JENKINS.URL}/metrics/{$JENKINS.API.KEY}/healthcheck'
- status_codes: ''
tags:
-
tag: Application
@@ -1253,7 +1251,6 @@ zabbix_export:
parameters:
- 30m
url: '{$JENKINS.URL}/metrics/{$JENKINS.API.KEY}/ping'
- status_codes: ''
tags:
-
tag: Application
@@ -1538,7 +1535,7 @@ zabbix_export:
delay: '0'
history: 7d
value_type: FLOAT
- description: 'The system load on the Jenkins master as reported by the JVM’s Operating System JMX bean. The calculation of system load is operating system dependent. Typically this is the sum of the number of processes that are currently running plus the number that are waiting to run. This is typically comparable against the number of CPU cores.'
+ description: 'The system load on the Jenkins master as reported by the JVM''s Operating System JMX bean. The calculation of system load is operating system dependent. Typically this is the sum of the number of processes that are currently running plus the number that are waiting to run. This is typically comparable against the number of CPU cores.'
preprocessing:
-
type: JSONPATH
@@ -2530,7 +2527,7 @@ zabbix_export:
name: 'Jenkins: One or more Jenkins plugins failed to start'
priority: INFO
description: |
- A failure is typically indicative of a potential issue within the Jenkins installation that will either be solved by explicitly disabling the failing plugin(s) or by resolving the corresponding plugin dependency issues.
+ A failure is typically indicative of a potential issue within the Jenkins installation that will either be solved by explicitly disabling the failing plugin(s) or by resolving the corresponding plugin dependency issues.
Health check message: {{ITEM.LASTVALUE2}.regsub("(.*)",\1)}
manual_close: 'YES'
-
diff --git a/templates/app/kafka_jmx/README.md b/templates/app/kafka_jmx/README.md
index e724a6d9a9a..10c2ec49653 100644
--- a/templates/app/kafka_jmx/README.md
+++ b/templates/app/kafka_jmx/README.md
@@ -10,7 +10,6 @@ Official JMX Template for Apache Kafka.
This template was tested on:
- Apache Kafka, version 2.6.0
-- Zabbix, version 5.0, 5.2
## Setup
@@ -28,14 +27,14 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|--------------------------------------------|-----------------------------------------------------------------------------------|----------------------|
-| {$KAFKA.NET_PROC_AVG_IDLE.MIN.WARN} | <p>The minimum Network processor average idle percent for trigger expression.</p> | `30` |
-| {$KAFKA.PASSWORD} | <p>-</p> | `zabbix` |
-| {$KAFKA.REQUEST_HANDLER_AVG_IDLE.MIN.WARN} | <p>The minimum Request handler average idle percent for trigger expression.</p> | `30` |
-| {$KAFKA.TOPIC.MATCHES} | <p>Filter of discoverable topics</p> | `.*` |
-| {$KAFKA.TOPIC.NOT_MATCHES} | <p>Filter to exclude discovered topics</p> | `__consumer_offsets` |
-| {$KAFKA.USER} | <p>-</p> | `zabbix` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$KAFKA.NET_PROC_AVG_IDLE.MIN.WARN} |<p>The minimum Network processor average idle percent for trigger expression.</p> |`30` |
+|{$KAFKA.PASSWORD} |<p>-</p> |`zabbix` |
+|{$KAFKA.REQUEST_HANDLER_AVG_IDLE.MIN.WARN} |<p>The minimum Request handler average idle percent for trigger expression.</p> |`30` |
+|{$KAFKA.TOPIC.MATCHES} |<p>Filter of discoverable topics</p> |`.*` |
+|{$KAFKA.TOPIC.NOT_MATCHES} |<p>Filter to exclude discovered topics</p> |`__consumer_offsets` |
+|{$KAFKA.USER} |<p>-</p> |`zabbix` |
## Template links
@@ -43,99 +42,98 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|------------------------|-------------|------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Topic Metrics (write) | <p>-</p> | JMX | jmx.discovery[beans,"kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=*"]<p>**Filter**:</p>AND <p>- A: {#JMXTOPIC} MATCHES_REGEX `{$KAFKA.TOPIC.MATCHES}`</p><p>- B: {#JMXTOPIC} NOT_MATCHES_REGEX `{$KAFKA.TOPIC.NOT_MATCHES}`</p> |
-| Topic Metrics (read) | <p>-</p> | JMX | jmx.discovery[beans,"kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic=*"]<p>**Filter**:</p>AND <p>- A: {#JMXTOPIC} MATCHES_REGEX `{$KAFKA.TOPIC.MATCHES}`</p><p>- B: {#JMXTOPIC} NOT_MATCHES_REGEX `{$KAFKA.TOPIC.NOT_MATCHES}`</p> |
-| Topic Metrics (errors) | <p>-</p> | JMX | jmx.discovery[beans,"kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec,topic=*"]<p>**Filter**:</p>AND <p>- A: {#JMXTOPIC} MATCHES_REGEX `{$KAFKA.TOPIC.MATCHES}`</p><p>- B: {#JMXTOPIC} NOT_MATCHES_REGEX `{$KAFKA.TOPIC.NOT_MATCHES}`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Topic Metrics (write) |<p>-</p> |JMX |jmx.discovery[beans,"kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=*"]<p>**Filter**:</p>AND <p>- {#JMXTOPIC} MATCHES_REGEX `{$KAFKA.TOPIC.MATCHES}`</p><p>- {#JMXTOPIC} NOT_MATCHES_REGEX `{$KAFKA.TOPIC.NOT_MATCHES}`</p> |
+|Topic Metrics (read) |<p>-</p> |JMX |jmx.discovery[beans,"kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic=*"]<p>**Filter**:</p>AND <p>- {#JMXTOPIC} MATCHES_REGEX `{$KAFKA.TOPIC.MATCHES}`</p><p>- {#JMXTOPIC} NOT_MATCHES_REGEX `{$KAFKA.TOPIC.NOT_MATCHES}`</p> |
+|Topic Metrics (errors) |<p>-</p> |JMX |jmx.discovery[beans,"kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec,topic=*"]<p>**Filter**:</p>AND <p>- {#JMXTOPIC} MATCHES_REGEX `{$KAFKA.TOPIC.MATCHES}`</p><p>- {#JMXTOPIC} NOT_MATCHES_REGEX `{$KAFKA.TOPIC.NOT_MATCHES}`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|-------|------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Kafka | Kafka: Leader election per second | <p>Number of leader elections per second.</p> | JMX | jmx["kafka.controller:type=ControllerStats,name=LeaderElectionRateAndTimeMs","Count"] |
-| Kafka | Kafka: Unclean leader election per second | <p>Number of “unclean” elections per second.</p> | JMX | jmx["kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: Controller state on broker | <p>One indicates that the broker is the controller for the cluster.</p> | JMX | jmx["kafka.controller:type=KafkaController,name=ActiveControllerCount","Value"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Kafka | Kafka: Ineligible pending replica deletes | <p>The number of ineligible pending replica deletes.</p> | JMX | jmx["kafka.controller:type=KafkaController,name=ReplicasIneligibleToDeleteCount","Value"] |
-| Kafka | Kafka: Pending replica deletes | <p>The number of pending replica deletes.</p> | JMX | jmx["kafka.controller:type=KafkaController,name=ReplicasToDeleteCount","Value"] |
-| Kafka | Kafka: Ineligible pending topic deletes | <p>The number of ineligible pending topic deletes.</p> | JMX | jmx["kafka.controller:type=KafkaController,name=TopicsIneligibleToDeleteCount","Value"] |
-| Kafka | Kafka: Pending topic deletes | <p>The number of pending topic deletes.</p> | JMX | jmx["kafka.controller:type=KafkaController,name=TopicsToDeleteCount","Value"] |
-| Kafka | Kafka: Offline log directory count | <p>The number of offline log directories (for example, after a hardware failure).</p> | JMX | jmx["kafka.log:type=LogManager,name=OfflineLogDirectoryCount","Value"] |
-| Kafka | Kafka: Offline partitions count | <p>Number of partitions that don't have an active leader.</p> | JMX | jmx["kafka.controller:type=KafkaController,name=OfflinePartitionsCount","Value"] |
-| Kafka | Kafka: Bytes out per second | <p>The rate at which data is fetched and read from the broker by consumers.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: Bytes in per second | <p>The rate at which data sent from producers is consumed by the broker.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: Messages in per second | <p>The rate at which individual messages are consumed by the broker.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: Bytes rejected per second | <p>The rate at which bytes rejected per second by the broker.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: Client fetch request failed per second | <p>Number of client fetch request failures per second.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=FailedFetchRequestsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: Produce requests failed per second | <p>Number of failed produce requests per second.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=FailedProduceRequestsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: Request handler average idle percent | <p>Indicates the percentage of time that the request handler (IO) threads are not in use.</p> | JMX | jmx["kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent","OneMinuteRate"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `100`</p> |
-| Kafka | Kafka: Fetch-Consumer response send time, mean | <p>Average time taken, in milliseconds, to send the response.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchConsumer","Mean"] |
-| Kafka | Kafka: Fetch-Consumer response send time, p95 | <p>The time taken, in milliseconds, to send the response for 95th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchConsumer","95thPercentile"] |
-| Kafka | Kafka: Fetch-Consumer response send time, p99 | <p>The time taken, in milliseconds, to send the response for 99th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchConsumer","99thPercentile"] |
-| Kafka | Kafka: Fetch-Follower response send time, mean | <p>Average time taken, in milliseconds, to send the response.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchFollower","Mean"] |
-| Kafka | Kafka: Fetch-Follower response send time, p95 | <p>The time taken, in milliseconds, to send the response for 95th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchFollower","95thPercentile"] |
-| Kafka | Kafka: Fetch-Follower response send time, p99 | <p>The time taken, in milliseconds, to send the response for 99th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchFollower","99thPercentile"] |
-| Kafka | Kafka: Produce response send time, mean | <p>Average time taken, in milliseconds, to send the response.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=Produce","Mean"] |
-| Kafka | Kafka: Produce response send time, p95 | <p>The time taken, in milliseconds, to send the response for 95th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=Produce","95thPercentile"] |
-| Kafka | Kafka: Produce response send time, p99 | <p>The time taken, in milliseconds, to send the response for 99th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=Produce","99thPercentile"] |
-| Kafka | Kafka: Fetch-Consumer request total time, mean | <p>Average time in ms to serve the Fetch-Consumer request.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer","Mean"] |
-| Kafka | Kafka: Fetch-Consumer request total time, p95 | <p>Time in ms to serve the Fetch-Consumer request for 95th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer","95thPercentile"] |
-| Kafka | Kafka: Fetch-Consumer request total time, p99 | <p>Time in ms to serve the specified Fetch-Consumer for 99th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer","99thPercentile"] |
-| Kafka | Kafka: Fetch-Follower request total time, mean | <p>Average time in ms to serve the Fetch-Follower request.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchFollower","Mean"] |
-| Kafka | Kafka: Fetch-Follower request total time, p95 | <p>Time in ms to serve the Fetch-Follower request for 95th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchFollower","95thPercentile"] |
-| Kafka | Kafka: Fetch-Follower request total time, p99 | <p>Time in ms to serve the Fetch-Follower request for 99th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchFollower","99thPercentile"] |
-| Kafka | Kafka: Produce request total time, mean | <p>Average time in ms to serve the Produce request.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce","Mean"] |
-| Kafka | Kafka: Produce request total time, p95 | <p>Time in ms to serve the Produce requests for 95th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce","95thPercentile"] |
-| Kafka | Kafka: Produce request total time, p99 | <p>Time in ms to serve the Produce requests for 99th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce","99thPercentile"] |
-| Kafka | Kafka: Fetch-Consumer request total time, mean | <p>Average time for a request to update metadata.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=UpdateMetadata","Mean"] |
-| Kafka | Kafka: UpdateMetadata request total time, p95 | <p>Time for update metadata requests for 95th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=UpdateMetadata","95thPercentile"] |
-| Kafka | Kafka: UpdateMetadata request total time, p99 | <p>Time for update metadata requests for 99th percentile.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=UpdateMetadata","99thPercentile"] |
-| Kafka | Kafka: Temporary memory size in bytes (Fetch), max | <p>The maximum of temporary memory used for converting message formats and decompressing messages.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Fetch","Max"] |
-| Kafka | Kafka: Temporary memory size in bytes (Fetch), avg | <p>The amount of temporary memory used for converting message formats and decompressing messages.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Fetch","Mean"] |
-| Kafka | Kafka: Temporary memory size in bytes (Fetch), min | <p>The minimum of temporary memory used for converting message formats and decompressing messages.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Fetch","Mean"] |
-| Kafka | Kafka: Temporary memory size in bytes (Produce), max | <p>The maximum of temporary memory used for converting message formats and decompressing messages.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Produce","Max"] |
-| Kafka | Kafka: Temporary memory size in bytes (Produce), avg | <p>The amount of temporary memory used for converting message formats and decompressing messages.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Produce","Mean"] |
-| Kafka | Kafka: Temporary memory size in bytes (Produce), min | <p>The minimum of temporary memory used for converting message formats and decompressing messages.</p> | JMX | jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Produce","Min"] |
-| Kafka | Kafka: Network processor average idle percent | <p>The average percentage of time that the network processors are idle.</p> | JMX | jmx["kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent","Value"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `100`</p> |
-| Kafka | Kafka: Requests in producer purgatory | <p>Number of requests waiting in producer purgatory.</p> | JMX | jmx["kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=Fetch","Value"] |
-| Kafka | Kafka: Requests in fetch purgatory | <p>Number of requests waiting in fetch purgatory.</p> | JMX | jmx["kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=Produce","Value"] |
-| Kafka | Kafka: Replication maximum lag | <p>The maximum lag between the time that messages are received by the leader replica and by the follower replicas.</p> | JMX | jmx["kafka.server:type=ReplicaFetcherManager,name=MaxLag,clientId=Replica","Value"] |
-| Kafka | Kafka: Under minimum ISR partition count | <p>The number of partitions under the minimum In-Sync Replica (ISR) count.</p> | JMX | jmx["kafka.server:type=ReplicaManager,name=UnderMinIsrPartitionCount","Value"] |
-| Kafka | Kafka: Under replicated partitions | <p>The number of partitions that have not been fully replicated in the follower replicas (the number of non-reassigning replicas - the number of ISR > 0).</p> | JMX | jmx["kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions","Value"] |
-| Kafka | Kafka: ISR expands per second | <p>The rate at which the number of ISRs in the broker increases.</p> | JMX | jmx["kafka.server:type=ReplicaManager,name=IsrExpandsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: ISR shrink per second | <p>Rate of replicas leaving the ISR pool.</p> | JMX | jmx["kafka.server:type=ReplicaManager,name=IsrShrinksPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: Leader count | <p>The number of replicas for which this broker is the leader.</p> | JMX | jmx["kafka.server:type=ReplicaManager,name=LeaderCount","Value"] |
-| Kafka | Kafka: Partition count | <p>The number of partitions in the broker.</p> | JMX | jmx["kafka.server:type=ReplicaManager,name=PartitionCount","Value"] |
-| Kafka | Kafka: Number of reassigning partitions | <p>The number of reassigning leader partitions on a broker.</p> | JMX | jmx["kafka.server:type=ReplicaManager,name=ReassigningPartitions","Value"] |
-| Kafka | Kafka: Request queue size | <p>The size of the delay queue.</p> | JMX | jmx["kafka.server:type=Request","queue-size"] |
-| Kafka | Kafka: Version | <p>Current version of brocker.</p> | JMX | jmx["kafka.server:type=app-info","version"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Kafka | Kafka: Uptime | <p>Service uptime in seconds.</p> | JMX | jmx["kafka.server:type=app-info","start-time-ms"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `return (Math.floor((Date.now()-Number(value))/1000))`</p> |
-| Kafka | Kafka: ZooKeeper client request latency | <p>Latency in milliseconds for ZooKeeper requests from broker.</p> | JMX | jmx["kafka.server:type=ZooKeeperClientMetrics,name=ZooKeeperRequestLatencyMs","Count"] |
-| Kafka | Kafka: ZooKeeper connection status | <p>Connection status of broker's ZooKeeper session.</p> | JMX | jmx["kafka.server:type=SessionExpireListener,name=SessionState","Value"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Kafka | Kafka: ZooKeeper disconnect rate | <p>ZooKeeper client disconnect per second.</p> | JMX | jmx["kafka.server:type=SessionExpireListener,name=ZooKeeperDisconnectsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: ZooKeeper session expiration rate | <p>ZooKeeper client session expiration per second.</p> | JMX | jmx["kafka.server:type=SessionExpireListener,name=ZooKeeperExpiresPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: ZooKeeper readonly rate | <p>ZooKeeper client readonly per second.</p> | JMX | jmx["kafka.server:type=SessionExpireListener,name=ZooKeeperReadOnlyConnectsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka: ZooKeeper sync rate | <p>ZooKeeper client sync per second.</p> | JMX | jmx["kafka.server:type=SessionExpireListener,name=ZooKeeperSyncConnectsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka {#JMXTOPIC}: Messages in per second | <p>The rate at which individual messages are consumed by topic.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic={#JMXTOPIC}","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka {#JMXTOPIC}: Bytes in per second | <p>The rate at which data sent from producers is consumed by topic.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec,topic={#JMXTOPIC}","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka {#JMXTOPIC}: Bytes out per second | <p>The rate at which data is fetched and read from the broker by consumers (by topic).</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic={#JMXTOPIC}","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-| Kafka | Kafka {#JMXTOPIC}: Bytes rejected per second | <p>Rejected bytes rate by topic.</p> | JMX | jmx["kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec,topic={#JMXTOPIC}","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|Kafka |Kafka: Leader election per second |<p>Number of leader elections per second.</p> |JMX |jmx["kafka.controller:type=ControllerStats,name=LeaderElectionRateAndTimeMs","Count"] |
+|Kafka |Kafka: Unclean leader election per second |<p>Number of “unclean” elections per second.</p> |JMX |jmx["kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: Controller state on broker |<p>One indicates that the broker is the controller for the cluster.</p> |JMX |jmx["kafka.controller:type=KafkaController,name=ActiveControllerCount","Value"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Kafka |Kafka: Ineligible pending replica deletes |<p>The number of ineligible pending replica deletes.</p> |JMX |jmx["kafka.controller:type=KafkaController,name=ReplicasIneligibleToDeleteCount","Value"] |
+|Kafka |Kafka: Pending replica deletes |<p>The number of pending replica deletes.</p> |JMX |jmx["kafka.controller:type=KafkaController,name=ReplicasToDeleteCount","Value"] |
+|Kafka |Kafka: Ineligible pending topic deletes |<p>The number of ineligible pending topic deletes.</p> |JMX |jmx["kafka.controller:type=KafkaController,name=TopicsIneligibleToDeleteCount","Value"] |
+|Kafka |Kafka: Pending topic deletes |<p>The number of pending topic deletes.</p> |JMX |jmx["kafka.controller:type=KafkaController,name=TopicsToDeleteCount","Value"] |
+|Kafka |Kafka: Offline log directory count |<p>The number of offline log directories (for example, after a hardware failure).</p> |JMX |jmx["kafka.log:type=LogManager,name=OfflineLogDirectoryCount","Value"] |
+|Kafka |Kafka: Offline partitions count |<p>Number of partitions that don't have an active leader.</p> |JMX |jmx["kafka.controller:type=KafkaController,name=OfflinePartitionsCount","Value"] |
+|Kafka |Kafka: Bytes out per second |<p>The rate at which data is fetched and read from the broker by consumers.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: Bytes in per second |<p>The rate at which data sent from producers is consumed by the broker.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: Messages in per second |<p>The rate at which individual messages are consumed by the broker.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: Bytes rejected per second |<p>The rate at which bytes rejected per second by the broker.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: Client fetch request failed per second |<p>Number of client fetch request failures per second.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=FailedFetchRequestsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: Produce requests failed per second |<p>Number of failed produce requests per second.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=FailedProduceRequestsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: Request handler average idle percent |<p>Indicates the percentage of time that the request handler (IO) threads are not in use.</p> |JMX |jmx["kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent","OneMinuteRate"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `100`</p> |
+|Kafka |Kafka: Fetch-Consumer response send time, mean |<p>Average time taken, in milliseconds, to send the response.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchConsumer","Mean"] |
+|Kafka |Kafka: Fetch-Consumer response send time, p95 |<p>The time taken, in milliseconds, to send the response for 95th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchConsumer","95thPercentile"] |
+|Kafka |Kafka: Fetch-Consumer response send time, p99 |<p>The time taken, in milliseconds, to send the response for 99th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchConsumer","99thPercentile"] |
+|Kafka |Kafka: Fetch-Follower response send time, mean |<p>Average time taken, in milliseconds, to send the response.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchFollower","Mean"] |
+|Kafka |Kafka: Fetch-Follower response send time, p95 |<p>The time taken, in milliseconds, to send the response for 95th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchFollower","95thPercentile"] |
+|Kafka |Kafka: Fetch-Follower response send time, p99 |<p>The time taken, in milliseconds, to send the response for 99th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=FetchFollower","99thPercentile"] |
+|Kafka |Kafka: Produce response send time, mean |<p>Average time taken, in milliseconds, to send the response.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=Produce","Mean"] |
+|Kafka |Kafka: Produce response send time, p95 |<p>The time taken, in milliseconds, to send the response for 95th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=Produce","95thPercentile"] |
+|Kafka |Kafka: Produce response send time, p99 |<p>The time taken, in milliseconds, to send the response for 99th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=ResponseSendTimeMs,request=Produce","99thPercentile"] |
+|Kafka |Kafka: Fetch-Consumer request total time, mean |<p>Average time in ms to serve the Fetch-Consumer request.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer","Mean"] |
+|Kafka |Kafka: Fetch-Consumer request total time, p95 |<p>Time in ms to serve the Fetch-Consumer request for 95th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer","95thPercentile"] |
+|Kafka |Kafka: Fetch-Consumer request total time, p99 |<p>Time in ms to serve the specified Fetch-Consumer for 99th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchConsumer","99thPercentile"] |
+|Kafka |Kafka: Fetch-Follower request total time, mean |<p>Average time in ms to serve the Fetch-Follower request.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchFollower","Mean"] |
+|Kafka |Kafka: Fetch-Follower request total time, p95 |<p>Time in ms to serve the Fetch-Follower request for 95th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchFollower","95thPercentile"] |
+|Kafka |Kafka: Fetch-Follower request total time, p99 |<p>Time in ms to serve the Fetch-Follower request for 99th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=FetchFollower","99thPercentile"] |
+|Kafka |Kafka: Produce request total time, mean |<p>Average time in ms to serve the Produce request.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce","Mean"] |
+|Kafka |Kafka: Produce request total time, p95 |<p>Time in ms to serve the Produce requests for 95th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce","95thPercentile"] |
+|Kafka |Kafka: Produce request total time, p99 |<p>Time in ms to serve the Produce requests for 99th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=Produce","99thPercentile"] |
+|Kafka |Kafka: Fetch-Consumer request total time, mean |<p>Average time for a request to update metadata.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=UpdateMetadata","Mean"] |
+|Kafka |Kafka: UpdateMetadata request total time, p95 |<p>Time for update metadata requests for 95th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=UpdateMetadata","95thPercentile"] |
+|Kafka |Kafka: UpdateMetadata request total time, p99 |<p>Time for update metadata requests for 99th percentile.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TotalTimeMs,request=UpdateMetadata","99thPercentile"] |
+|Kafka |Kafka: Temporary memory size in bytes (Fetch), max |<p>The maximum of temporary memory used for converting message formats and decompressing messages.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Fetch","Max"] |
+|Kafka |Kafka: Temporary memory size in bytes (Fetch), min |<p>The minimum of temporary memory used for converting message formats and decompressing messages.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Fetch","Mean"] |
+|Kafka |Kafka: Temporary memory size in bytes (Produce), max |<p>The maximum of temporary memory used for converting message formats and decompressing messages.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Produce","Max"] |
+|Kafka |Kafka: Temporary memory size in bytes (Produce), avg |<p>The amount of temporary memory used for converting message formats and decompressing messages.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Produce","Mean"] |
+|Kafka |Kafka: Temporary memory size in bytes (Produce), min |<p>The minimum of temporary memory used for converting message formats and decompressing messages.</p> |JMX |jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Produce","Min"] |
+|Kafka |Kafka: Network processor average idle percent |<p>The average percentage of time that the network processors are idle.</p> |JMX |jmx["kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent","Value"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `100`</p> |
+|Kafka |Kafka: Requests in producer purgatory |<p>Number of requests waiting in producer purgatory.</p> |JMX |jmx["kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=Fetch","Value"] |
+|Kafka |Kafka: Requests in fetch purgatory |<p>Number of requests waiting in fetch purgatory.</p> |JMX |jmx["kafka.server:type=DelayedOperationPurgatory,name=PurgatorySize,delayedOperation=Produce","Value"] |
+|Kafka |Kafka: Replication maximum lag |<p>The maximum lag between the time that messages are received by the leader replica and by the follower replicas.</p> |JMX |jmx["kafka.server:type=ReplicaFetcherManager,name=MaxLag,clientId=Replica","Value"] |
+|Kafka |Kafka: Under minimum ISR partition count |<p>The number of partitions under the minimum In-Sync Replica (ISR) count.</p> |JMX |jmx["kafka.server:type=ReplicaManager,name=UnderMinIsrPartitionCount","Value"] |
+|Kafka |Kafka: Under replicated partitions |<p>The number of partitions that have not been fully replicated in the follower replicas (the number of non-reassigning replicas - the number of ISR > 0).</p> |JMX |jmx["kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions","Value"] |
+|Kafka |Kafka: ISR expands per second |<p>The rate at which the number of ISRs in the broker increases.</p> |JMX |jmx["kafka.server:type=ReplicaManager,name=IsrExpandsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: ISR shrink per second |<p>Rate of replicas leaving the ISR pool.</p> |JMX |jmx["kafka.server:type=ReplicaManager,name=IsrShrinksPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: Leader count |<p>The number of replicas for which this broker is the leader.</p> |JMX |jmx["kafka.server:type=ReplicaManager,name=LeaderCount","Value"] |
+|Kafka |Kafka: Partition count |<p>The number of partitions in the broker.</p> |JMX |jmx["kafka.server:type=ReplicaManager,name=PartitionCount","Value"] |
+|Kafka |Kafka: Number of reassigning partitions |<p>The number of reassigning leader partitions on a broker.</p> |JMX |jmx["kafka.server:type=ReplicaManager,name=ReassigningPartitions","Value"] |
+|Kafka |Kafka: Request queue size |<p>The size of the delay queue.</p> |JMX |jmx["kafka.server:type=Request","queue-size"] |
+|Kafka |Kafka: Version |<p>Current version of brocker.</p> |JMX |jmx["kafka.server:type=app-info","version"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Kafka |Kafka: Uptime |<p>Service uptime in seconds.</p> |JMX |jmx["kafka.server:type=app-info","start-time-ms"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `return (Math.floor((Date.now()-Number(value))/1000))`</p> |
+|Kafka |Kafka: ZooKeeper client request latency |<p>Latency in milliseconds for ZooKeeper requests from broker.</p> |JMX |jmx["kafka.server:type=ZooKeeperClientMetrics,name=ZooKeeperRequestLatencyMs","Count"] |
+|Kafka |Kafka: ZooKeeper connection status |<p>Connection status of broker's ZooKeeper session.</p> |JMX |jmx["kafka.server:type=SessionExpireListener,name=SessionState","Value"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Kafka |Kafka: ZooKeeper disconnect rate |<p>ZooKeeper client disconnect per second.</p> |JMX |jmx["kafka.server:type=SessionExpireListener,name=ZooKeeperDisconnectsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: ZooKeeper session expiration rate |<p>ZooKeeper client session expiration per second.</p> |JMX |jmx["kafka.server:type=SessionExpireListener,name=ZooKeeperExpiresPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: ZooKeeper readonly rate |<p>ZooKeeper client readonly per second.</p> |JMX |jmx["kafka.server:type=SessionExpireListener,name=ZooKeeperReadOnlyConnectsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka: ZooKeeper sync rate |<p>ZooKeeper client sync per second.</p> |JMX |jmx["kafka.server:type=SessionExpireListener,name=ZooKeeperSyncConnectsPerSec","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka {#JMXTOPIC}: Messages in per second |<p>The rate at which individual messages are consumed by topic.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic={#JMXTOPIC}","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka {#JMXTOPIC}: Bytes in per second |<p>The rate at which data sent from producers is consumed by topic.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec,topic={#JMXTOPIC}","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka {#JMXTOPIC}: Bytes out per second |<p>The rate at which data is fetched and read from the broker by consumers (by topic).</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic={#JMXTOPIC}","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Kafka |Kafka {#JMXTOPIC}: Bytes rejected per second |<p>Rejected bytes rate by topic.</p> |JMX |jmx["kafka.server:type=BrokerTopicMetrics,name=BytesRejectedPerSec,topic={#JMXTOPIC}","Count"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|-------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|----------------------------------|
-| Kafka: Unclean leader election detected | <p>Unclean leader elections occur when there is no qualified partition leader among Kafka brokers. If Kafka is configured to allow an unclean leader election, a leader is chosen from the out-of-sync replicas, and any messages that were not synced prior to the loss of the former leader are lost forever. Essentially, unclean leader elections sacrifice consistency for availability.</p> | `{TEMPLATE_NAME:jmx["kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec","Count"].last()}>0` | AVERAGE | |
-| Kafka: There are offline log directories | <p>The offline log directory count metric indicate the number of log directories which are offline (due to an hardware failure for example) so that the broker cannot store incoming messages anymore.</p> | `{TEMPLATE_NAME:jmx["kafka.log:type=LogManager,name=OfflineLogDirectoryCount","Value"].last()} > 0` | WARNING | |
-| Kafka: One or more partitions have no leader | <p>Any partition without an active leader will be completely inaccessible, and both consumers and producers of that partition will be blocked until a leader becomes available.</p> | `{TEMPLATE_NAME:jmx["kafka.controller:type=KafkaController,name=OfflinePartitionsCount","Value"].last()} > 0` | WARNING | |
-| Kafka: Request handler average idle percent is too low (under {$KAFKA.REQUEST_HANDLER_AVG_IDLE.MIN.WARN} for 15m) | <p>The request handler idle ratio metric indicates the percentage of time the request handlers are not in use. The lower this number, the more loaded the broker is.</p> | `{TEMPLATE_NAME:jmx["kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent","OneMinuteRate"].max(15m)}<{$KAFKA.REQUEST_HANDLER_AVG_IDLE.MIN.WARN}` | AVERAGE | |
-| Kafka: Network processor average idle percent is too low (under {$KAFKA.NET_PROC_AVG_IDLE.MIN.WARN} for 15m) | <p>The network processor idle ratio metric indicates the percentage of time the network processor are not in use. The lower this number, the more loaded the broker is.</p> | `{TEMPLATE_NAME:jmx["kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent","Value"].max(15m)}<{$KAFKA.NET_PROC_AVG_IDLE.MIN.WARN}` | AVERAGE | |
-| Kafka: Failed to fetch info data (or no data for 15m) | <p>Zabbix has not received data for items for the last 15 minutes</p> | `{TEMPLATE_NAME:jmx["kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent","Value"].nodata(15m)}=1` | WARNING | |
-| Kafka: There are partitions under the min ISR | <p>The Under min ISR partitions metric displays the number of partitions, where the number of In-Sync Replicas (ISR) is less than the minimum number of in-sync replicas specified. The two most common causes of under-min ISR partitions are that one or more brokers is unresponsive, or the cluster is experiencing performance issues and one or more brokers are falling behind.</p> | `{TEMPLATE_NAME:jmx["kafka.server:type=ReplicaManager,name=UnderMinIsrPartitionCount","Value"].last()}>0` | AVERAGE | |
-| Kafka: There are under replicated partitions | <p>The Under replicated partitions metric displays the number of partitions that do not have enough replicas to meet the desired replication factor. A partition will also be considered under-replicated if the correct number of replicas exist, but one or more of the replicas have fallen significantly behind the partition leader. The two most common causes of under-replicated partitions are that one or more brokers is unresponsive, or the cluster is experiencing performance issues and one or more brokers have fallen behind.</p> | `{TEMPLATE_NAME:jmx["kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions","Value"].last()}>0` | AVERAGE | |
-| Kafka: Version has changed (new version: {ITEM.VALUE}) | <p>Kafka version has changed. Ack to close.</p> | `{TEMPLATE_NAME:jmx["kafka.server:type=app-info","version"].diff()}=1 and {TEMPLATE_NAME:jmx["kafka.server:type=app-info","version"].strlen()}>0` | INFO | <p>Manual close: YES</p> |
-| Kafka: has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:jmx["kafka.server:type=app-info","start-time-ms"].last()}<10m` | INFO | <p>Manual close: YES</p> |
-| Kafka: Broker is not connected to ZooKeeper | <p>-</p> | `{TEMPLATE_NAME:jmx["kafka.server:type=SessionExpireListener,name=SessionState","Value"].regexp("CONNECTED")}=0` | AVERAGE | |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|Kafka: Unclean leader election detected |<p>Unclean leader elections occur when there is no qualified partition leader among Kafka brokers. If Kafka is configured to allow an unclean leader election, a leader is chosen from the out-of-sync replicas, and any messages that were not synced prior to the loss of the former leader are lost forever. Essentially, unclean leader elections sacrifice consistency for availability.</p> |`last(/Apache Kafka by JMX/jmx["kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec","Count"])>0` |AVERAGE | |
+|Kafka: There are offline log directories |<p>The offline log directory count metric indicate the number of log directories which are offline (due to an hardware failure for example) so that the broker cannot store incoming messages anymore.</p> |`last(/Apache Kafka by JMX/jmx["kafka.log:type=LogManager,name=OfflineLogDirectoryCount","Value"]) > 0` |WARNING | |
+|Kafka: One or more partitions have no leader |<p>Any partition without an active leader will be completely inaccessible, and both consumers and producers of that partition will be blocked until a leader becomes available.</p> |`last(/Apache Kafka by JMX/jmx["kafka.controller:type=KafkaController,name=OfflinePartitionsCount","Value"]) > 0` |WARNING | |
+|Kafka: Request handler average idle percent is too low (under {$KAFKA.REQUEST_HANDLER_AVG_IDLE.MIN.WARN} for 15m) |<p>The request handler idle ratio metric indicates the percentage of time the request handlers are not in use. The lower this number, the more loaded the broker is.</p> |`max(/Apache Kafka by JMX/jmx["kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent","OneMinuteRate"],15m)<{$KAFKA.REQUEST_HANDLER_AVG_IDLE.MIN.WARN}` |AVERAGE | |
+|Kafka: Network processor average idle percent is too low (under {$KAFKA.NET_PROC_AVG_IDLE.MIN.WARN} for 15m) |<p>The network processor idle ratio metric indicates the percentage of time the network processor are not in use. The lower this number, the more loaded the broker is.</p> |`max(/Apache Kafka by JMX/jmx["kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent","Value"],15m)<{$KAFKA.NET_PROC_AVG_IDLE.MIN.WARN}` |AVERAGE | |
+|Kafka: Failed to fetch info data (or no data for 15m) |<p>Zabbix has not received data for items for the last 15 minutes</p> |`nodata(/Apache Kafka by JMX/jmx["kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent","Value"],15m)=1` |WARNING | |
+|Kafka: There are partitions under the min ISR |<p>The Under min ISR partitions metric displays the number of partitions, where the number of In-Sync Replicas (ISR) is less than the minimum number of in-sync replicas specified. The two most common causes of under-min ISR partitions are that one or more brokers is unresponsive, or the cluster is experiencing performance issues and one or more brokers are falling behind.</p> |`last(/Apache Kafka by JMX/jmx["kafka.server:type=ReplicaManager,name=UnderMinIsrPartitionCount","Value"])>0` |AVERAGE | |
+|Kafka: There are under replicated partitions |<p>The Under replicated partitions metric displays the number of partitions that do not have enough replicas to meet the desired replication factor. A partition will also be considered under-replicated if the correct number of replicas exist, but one or more of the replicas have fallen significantly behind the partition leader. The two most common causes of under-replicated partitions are that one or more brokers is unresponsive, or the cluster is experiencing performance issues and one or more brokers have fallen behind.</p> |`last(/Apache Kafka by JMX/jmx["kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions","Value"])>0` |AVERAGE | |
+|Kafka: Version has changed (new version: {ITEM.VALUE}) |<p>Kafka version has changed. Ack to close.</p> |`last(/Apache Kafka by JMX/jmx["kafka.server:type=app-info","version"],#1)<>last(/Apache Kafka by JMX/jmx["kafka.server:type=app-info","version"],#2) and length(last(/Apache Kafka by JMX/jmx["kafka.server:type=app-info","version"]))>0` |INFO |<p>Manual close: YES</p> |
+|Kafka: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Apache Kafka by JMX/jmx["kafka.server:type=app-info","start-time-ms"])<10m` |INFO |<p>Manual close: YES</p> |
+|Kafka: Broker is not connected to ZooKeeper |<p>-</p> |`find(/Apache Kafka by JMX/jmx["kafka.server:type=SessionExpireListener,name=SessionState","Value"],,"regexp","CONNECTED")=0` |AVERAGE | |
## Feedback
diff --git a/templates/app/kafka_jmx/template_app_kafka_jmx.yaml b/templates/app/kafka_jmx/template_app_kafka_jmx.yaml
index 048bb5331e0..3c02b073db5 100644
--- a/templates/app/kafka_jmx/template_app_kafka_jmx.yaml
+++ b/templates/app/kafka_jmx/template_app_kafka_jmx.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:15Z'
+ date: '2021-12-19T15:19:43Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -18,7 +18,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -1538,16 +1538,9 @@ zabbix_export:
key: 'jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Fetch","Mean"]'
-
sortorder: '4'
- drawtype: BOLD_LINE
color: FC6EA3
item:
host: 'Apache Kafka by JMX'
- key: 'jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Fetch","Mean"]'
- -
- sortorder: '5'
- color: 6C59DC
- item:
- host: 'Apache Kafka by JMX'
key: 'jmx["kafka.network:type=RequestMetrics,name=TemporaryMemoryBytes,request=Fetch","Max"]'
-
uuid: e5f69b35415b49168fba4ffa10e75f25
diff --git a/templates/app/memcached/README.md b/templates/app/memcached/README.md
index 358793eaaa1..de2b3283fcb 100644
--- a/templates/app/memcached/README.md
+++ b/templates/app/memcached/README.md
@@ -30,13 +30,13 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------|
-| {$MEMCACHED.CONN.PRC.MAX.WARN} | <p>Maximum percentage of connected clients</p> | `80` |
-| {$MEMCACHED.CONN.QUEUED.MAX.WARN} | <p>Maximum number of queued connections per second</p> | `1` |
-| {$MEMCACHED.CONN.THROTTLED.MAX.WARN} | <p>Maximum number of throttled connections per second</p> | `1` |
-| {$MEMCACHED.CONN.URI} | <p>Connection string in the URI format (password is not used). This param overwrites a value configured in the "Plugins.Memcached.Uri" option of the configuration file (if it's set), otherwise, the plugin's default value is used: "tcp://localhost:11211"</p> | `tcp://localhost:11211` |
-| {$MEMCACHED.MEM.PUSED.MAX.WARN} | <p>Maximum percentage of memory used</p> | `90` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$MEMCACHED.CONN.PRC.MAX.WARN} |<p>Maximum percentage of connected clients</p> |`80` |
+|{$MEMCACHED.CONN.QUEUED.MAX.WARN} |<p>Maximum number of queued connections per second</p> |`1` |
+|{$MEMCACHED.CONN.THROTTLED.MAX.WARN} |<p>Maximum number of throttled connections per second</p> |`1` |
+|{$MEMCACHED.CONN.URI} |<p>Connection string in the URI format (password is not used). This param overwrites a value configured in the "Plugins.Memcached.Uri" option of the configuration file (if it's set), otherwise, the plugin's default value is used: "tcp://localhost:11211"</p> |`tcp://localhost:11211` |
+|{$MEMCACHED.MEM.PUSED.MAX.WARN} |<p>Maximum percentage of memory used</p> |`90` |
## Template links
@@ -47,47 +47,47 @@ There are no template links in this template.
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|-------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------|------------------------------------------------------------------------------------------------------------------------------------------|
-| Memcached | Memcached: Ping | | ZABBIX_PASSIVE | memcached.ping["{$MEMCACHED.CONN.URI}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| Memcached | Memcached: Max connections | <p>Max number of concurrent connections</p> | DEPENDENT | memcached.connections.max<p>**Preprocessing**:</p><p>- JSONPATH: `$.max_connections`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-| Memcached | Memcached: Maximum number of bytes | <p>Maximum number of bytes allowed in cache. You can adjust this setting via a config file or the command line while starting your Memcached server.</p> | DEPENDENT | memcached.config.limit_maxbytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.limit_maxbytes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-| Memcached | Memcached: CPU sys | <p>System CPU consumed by the Memcached server</p> | DEPENDENT | memcached.cpu.sys<p>**Preprocessing**:</p><p>- JSONPATH: `$.rusage_system`</p> |
-| Memcached | Memcached: CPU user | <p>User CPU consumed by the Memcached server</p> | DEPENDENT | memcached.cpu.user<p>**Preprocessing**:</p><p>- JSONPATH: `$.rusage_user`</p> |
-| Memcached | Memcached: Queued connections per second | <p>Number of times that memcached has hit its connections limit and disabled its listener</p> | DEPENDENT | memcached.connections.queued.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.listen_disabled_num`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: New connections per second | <p>Number of connections opened per second</p> | DEPENDENT | memcached.connections.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.total_connections`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Throttled connections | <p>Number of times a client connection was throttled. When sending GETs in batch mode and the connection contains too many requests (limited by -R parameter) the connection might be throttled to prevent starvation.</p> | DEPENDENT | memcached.connections.throttled.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.conn_yields`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Connection structures | <p>Number of connection structures allocated by the server</p> | DEPENDENT | memcached.connections.structures<p>**Preprocessing**:</p><p>- JSONPATH: `$.connection_structures`</p> |
-| Memcached | Memcached: Open connections | <p>The number of clients presently connected</p> | DEPENDENT | memcached.connections.current<p>**Preprocessing**:</p><p>- JSONPATH: `$.curr_connections`</p> |
-| Memcached | Memcached: Commands: FLUSH per second | <p>The flush_all command invalidates all items in the database. This operation incurs a performance penalty and shouldn’t take place in production, so check your debug scripts.</p> | DEPENDENT | memcached.commands.flush.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.cmd_flush`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Commands: GET per second | <p>Number of GET requests received by server per second.</p> | DEPENDENT | memcached.commands.get.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.cmd_get`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Commands: SET per second | <p>Number of SET requests received by server per second.</p> | DEPENDENT | memcached.commands.set.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.cmd_set`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Process id | <p>PID of the server process</p> | DEPENDENT | memcached.process_id<p>**Preprocessing**:</p><p>- JSONPATH: `$.pid`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| Memcached | Memcached: Memcached version | <p>Version of the Memcached server</p> | DEPENDENT | memcached.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| Memcached | Memcached: Uptime | <p>Number of seconds since Memcached server start</p> | DEPENDENT | memcached.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.uptime`</p> |
-| Memcached | Memcached: Bytes used | <p>Current number of bytes used to store items.</p> | DEPENDENT | memcached.stats.bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.bytes`</p> |
-| Memcached | Memcached: Written bytes per second | <p>The network's read rate per second in B/sec</p> | DEPENDENT | memcached.stats.bytes_written.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.bytes_written`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Read bytes per second | <p>The network's read rate per second in B/sec</p> | DEPENDENT | memcached.stats.bytes_read.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.bytes_read`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Hits per second | <p>Number of successful GET requests (items requested and found) per second.</p> | DEPENDENT | memcached.stats.hits.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.get_hits`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Misses per second | <p>Number of missed GET requests (items requested but not found) per second.</p> | DEPENDENT | memcached.stats.misses.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.get_misses`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Evictions per second | <p>"An eviction is when an item that still has time to live is removed from the cache because a brand new item needs to be allocated.</p><p>The item is selected with a pseudo-LRU mechanism.</p><p>A high number of evictions coupled with a low hit rate means your application is setting a large number of keys that are never used again."</p> | DEPENDENT | memcached.stats.evictions.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.evictions`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: New items per second | <p>Number of new items stored per second.</p> | DEPENDENT | memcached.stats.total_items.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.total_items`</p><p>- CHANGE_PER_SECOND |
-| Memcached | Memcached: Current number of items stored | <p>Current number of items stored by this instance.</p> | DEPENDENT | memcached.stats.curr_items<p>**Preprocessing**:</p><p>- JSONPATH: `$.curr_items`</p> |
-| Memcached | Memcached: Threads | <p>Number of worker threads requested</p> | DEPENDENT | memcached.stats.threads<p>**Preprocessing**:</p><p>- JSONPATH: `$.threads`</p> |
-| Zabbix_raw_items | Memcached: Get status | | ZABBIX_PASSIVE | memcached.stats["{$MEMCACHED.CONN.URI}"] |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|Memcached |Memcached: Ping | |ZABBIX_PASSIVE |memcached.ping["{$MEMCACHED.CONN.URI}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|Memcached |Memcached: Max connections |<p>Max number of concurrent connections</p> |DEPENDENT |memcached.connections.max<p>**Preprocessing**:</p><p>- JSONPATH: `$.max_connections`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|Memcached |Memcached: Maximum number of bytes |<p>Maximum number of bytes allowed in cache. You can adjust this setting via a config file or the command line while starting your Memcached server.</p> |DEPENDENT |memcached.config.limit_maxbytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.limit_maxbytes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
+|Memcached |Memcached: CPU sys |<p>System CPU consumed by the Memcached server</p> |DEPENDENT |memcached.cpu.sys<p>**Preprocessing**:</p><p>- JSONPATH: `$.rusage_system`</p> |
+|Memcached |Memcached: CPU user |<p>User CPU consumed by the Memcached server</p> |DEPENDENT |memcached.cpu.user<p>**Preprocessing**:</p><p>- JSONPATH: `$.rusage_user`</p> |
+|Memcached |Memcached: Queued connections per second |<p>Number of times that memcached has hit its connections limit and disabled its listener</p> |DEPENDENT |memcached.connections.queued.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.listen_disabled_num`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: New connections per second |<p>Number of connections opened per second</p> |DEPENDENT |memcached.connections.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.total_connections`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Throttled connections |<p>Number of times a client connection was throttled. When sending GETs in batch mode and the connection contains too many requests (limited by -R parameter) the connection might be throttled to prevent starvation.</p> |DEPENDENT |memcached.connections.throttled.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.conn_yields`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Connection structures |<p>Number of connection structures allocated by the server</p> |DEPENDENT |memcached.connections.structures<p>**Preprocessing**:</p><p>- JSONPATH: `$.connection_structures`</p> |
+|Memcached |Memcached: Open connections |<p>The number of clients presently connected</p> |DEPENDENT |memcached.connections.current<p>**Preprocessing**:</p><p>- JSONPATH: `$.curr_connections`</p> |
+|Memcached |Memcached: Commands: FLUSH per second |<p>The flush_all command invalidates all items in the database. This operation incurs a performance penalty and shouldn't take place in production, so check your debug scripts.</p> |DEPENDENT |memcached.commands.flush.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.cmd_flush`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Commands: GET per second |<p>Number of GET requests received by server per second.</p> |DEPENDENT |memcached.commands.get.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.cmd_get`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Commands: SET per second |<p>Number of SET requests received by server per second.</p> |DEPENDENT |memcached.commands.set.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.cmd_set`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Process id |<p>PID of the server process</p> |DEPENDENT |memcached.process_id<p>**Preprocessing**:</p><p>- JSONPATH: `$.pid`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Memcached |Memcached: Memcached version |<p>Version of the Memcached server</p> |DEPENDENT |memcached.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Memcached |Memcached: Uptime |<p>Number of seconds since Memcached server start</p> |DEPENDENT |memcached.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.uptime`</p> |
+|Memcached |Memcached: Bytes used |<p>Current number of bytes used to store items.</p> |DEPENDENT |memcached.stats.bytes<p>**Preprocessing**:</p><p>- JSONPATH: `$.bytes`</p> |
+|Memcached |Memcached: Written bytes per second |<p>The network's read rate per second in B/sec</p> |DEPENDENT |memcached.stats.bytes_written.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.bytes_written`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Read bytes per second |<p>The network's read rate per second in B/sec</p> |DEPENDENT |memcached.stats.bytes_read.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.bytes_read`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Hits per second |<p>Number of successful GET requests (items requested and found) per second.</p> |DEPENDENT |memcached.stats.hits.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.get_hits`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Misses per second |<p>Number of missed GET requests (items requested but not found) per second.</p> |DEPENDENT |memcached.stats.misses.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.get_misses`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Evictions per second |<p>"An eviction is when an item that still has time to live is removed from the cache because a brand new item needs to be allocated.</p><p>The item is selected with a pseudo-LRU mechanism.</p><p>A high number of evictions coupled with a low hit rate means your application is setting a large number of keys that are never used again."</p> |DEPENDENT |memcached.stats.evictions.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.evictions`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: New items per second |<p>Number of new items stored per second.</p> |DEPENDENT |memcached.stats.total_items.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.total_items`</p><p>- CHANGE_PER_SECOND</p> |
+|Memcached |Memcached: Current number of items stored |<p>Current number of items stored by this instance.</p> |DEPENDENT |memcached.stats.curr_items<p>**Preprocessing**:</p><p>- JSONPATH: `$.curr_items`</p> |
+|Memcached |Memcached: Threads |<p>Number of worker threads requested</p> |DEPENDENT |memcached.stats.threads<p>**Preprocessing**:</p><p>- JSONPATH: `$.threads`</p> |
+|Zabbix_raw_items |Memcached: Get status | |ZABBIX_PASSIVE |memcached.stats["{$MEMCACHED.CONN.URI}"] |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|-------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------|----------|-----------------------------------------------------------------------------------|
-| Memcached: Service is down | <p>-</p> | `{TEMPLATE_NAME:memcached.ping["{$MEMCACHED.CONN.URI}"].last()}=0` | AVERAGE | <p>Manual close: YES</p> |
-| Memcached: Failed to fetch info data (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes</p> | `{TEMPLATE_NAME:memcached.cpu.sys.nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- Memcached: Service is down</p> |
-| Memcached: Too many queued connections (over {$MEMCACHED.CONN.QUEUED.MAX.WARN} in 5m) | <p>The max number of connections is reachedand and a new connection had to wait in the queue as a result.</p> | `{TEMPLATE_NAME:memcached.connections.queued.rate.min(5m)}>{$MEMCACHED.CONN.QUEUED.MAX.WARN}` | WARNING | |
-| Memcached: Too many throttled connections (over {$MEMCACHED.CONN.THROTTLED.MAX.WARN} in 5m) | <p>Number of times a client connection was throttled is too high.</p><p>When sending GETs in batch mode and the connection contains too many requests (limited by -R parameter) the connection might be throttled to prevent starvation.</p> | `{TEMPLATE_NAME:memcached.connections.throttled.rate.min(5m)}>{$MEMCACHED.CONN.THROTTLED.MAX.WARN}` | WARNING | |
-| Memcached: Total number of connected clients is too high (over {$MEMCACHED.CONN.PRC.MAX.WARN}% in 5m) | <p>When the number of connections reaches the value of the "max_connections" parameter, new connections will be rejected.</p> | `{TEMPLATE_NAME:memcached.connections.current.min(5m)}/{Memcached:memcached.connections.max.last()}*100>{$MEMCACHED.CONN.PRC.MAX.WARN}` | WARNING | |
-| Memcached: Version has changed (new version: {ITEM.VALUE}) | <p>Memcached version has changed. Ack to close.</p> | `{TEMPLATE_NAME:memcached.version.diff()}=1 and {TEMPLATE_NAME:memcached.version.strlen()}>0` | INFO | <p>Manual close: YES</p> |
-| Memcached: has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:memcached.uptime.last()}<10m` | INFO | <p>Manual close: YES</p> |
-| Memcached: Memory usage is too high (over {$MEMCACHED.MEM.PUSED.MAX.WARN} in 5m) | <p>-</p> | `{TEMPLATE_NAME:memcached.stats.bytes.min(5m)}/{Memcached:memcached.config.limit_maxbytes.last()}*100>{$MEMCACHED.MEM.PUSED.MAX.WARN}` | WARNING | |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|Memcached: Service is down |<p>-</p> |`last(/Memcached by Zabbix agent 2/memcached.ping["{$MEMCACHED.CONN.URI}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|Memcached: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`nodata(/Memcached by Zabbix agent 2/memcached.cpu.sys,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Memcached: Service is down</p> |
+|Memcached: Too many queued connections (over {$MEMCACHED.CONN.QUEUED.MAX.WARN} in 5m) |<p>The max number of connections is reachedand and a new connection had to wait in the queue as a result.</p> |`min(/Memcached by Zabbix agent 2/memcached.connections.queued.rate,5m)>{$MEMCACHED.CONN.QUEUED.MAX.WARN}` |WARNING | |
+|Memcached: Too many throttled connections (over {$MEMCACHED.CONN.THROTTLED.MAX.WARN} in 5m) |<p>Number of times a client connection was throttled is too high.</p><p>When sending GETs in batch mode and the connection contains too many requests (limited by -R parameter) the connection might be throttled to prevent starvation.</p> |`min(/Memcached by Zabbix agent 2/memcached.connections.throttled.rate,5m)>{$MEMCACHED.CONN.THROTTLED.MAX.WARN}` |WARNING | |
+|Memcached: Total number of connected clients is too high (over {$MEMCACHED.CONN.PRC.MAX.WARN}% in 5m) |<p>When the number of connections reaches the value of the "max_connections" parameter, new connections will be rejected.</p> |`min(/Memcached by Zabbix agent 2/memcached.connections.current,5m)/last(/Memcached by Zabbix agent 2/memcached.connections.max)*100>{$MEMCACHED.CONN.PRC.MAX.WARN}` |WARNING | |
+|Memcached: Version has changed (new version: {ITEM.VALUE}) |<p>Memcached version has changed. Ack to close.</p> |`last(/Memcached by Zabbix agent 2/memcached.version,#1)<>last(/Memcached by Zabbix agent 2/memcached.version,#2) and length(last(/Memcached by Zabbix agent 2/memcached.version))>0` |INFO |<p>Manual close: YES</p> |
+|Memcached: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Memcached by Zabbix agent 2/memcached.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|Memcached: Memory usage is too high (over {$MEMCACHED.MEM.PUSED.MAX.WARN} in 5m) |<p>-</p> |`min(/Memcached by Zabbix agent 2/memcached.stats.bytes,5m)/last(/Memcached by Zabbix agent 2/memcached.config.limit_maxbytes)*100>{$MEMCACHED.MEM.PUSED.MAX.WARN}` |WARNING | |
## Feedback
diff --git a/templates/app/memcached/template_app_memcached.yaml b/templates/app/memcached/template_app_memcached.yaml
index a3b2f22eb00..cb7e2a76590 100644
--- a/templates/app/memcached/template_app_memcached.yaml
+++ b/templates/app/memcached/template_app_memcached.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:04:14Z'
+ date: '2021-12-19T15:19:44Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -8,14 +8,14 @@ zabbix_export:
templates:
-
uuid: 05894ba2c9184d33992bf1bd21c347f6
- template: Memcached by Zabbix agent 2
- name: Memcached by Zabbix agent 2
+ template: 'Memcached by Zabbix agent 2'
+ name: 'Memcached by Zabbix agent 2'
description: |
Get Memcached metrics from plugin for the New Zabbix Agent (zabbix-agent2).
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/398623-discussion-thread-for-official-zabbix-template-memcached
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -28,7 +28,7 @@ zabbix_export:
delay: '0'
history: 7d
value_type: FLOAT
- description: 'The flush_all command invalidates all items in the database. This operation incurs a performance penalty and shouldn’t take place in production, so check your debug scripts.'
+ description: 'The flush_all command invalidates all items in the database. This operation incurs a performance penalty and shouldn''t take place in production, so check your debug scripts.'
preprocessing:
-
type: JSONPATH
@@ -603,7 +603,7 @@ zabbix_export:
triggers:
-
uuid: c53a2d7b861b41458976bfa03ab1a105
- expression: last(/Memcached by Zabbix agent 2/memcached.uptime)<10m
+ expression: 'last(/Memcached by Zabbix agent 2/memcached.uptime)<10m'
name: 'Memcached: has been restarted (uptime < 10m)'
priority: INFO
description: 'Uptime is less than 10 minutes'
diff --git a/templates/app/nginx_agent/README.md b/templates/app/nginx_agent/README.md
index d8dcfd0c834..78d563100b3 100644
--- a/templates/app/nginx_agent/README.md
+++ b/templates/app/nginx_agent/README.md
@@ -52,13 +52,13 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|---------------------------------|-----------------------------------------------------------------------------|----------------|
-| {$NGINX.DROP_RATE.MAX.WARN} | <p>The critical rate of the dropped connections for trigger expression.</p> | `1` |
-| {$NGINX.RESPONSE_TIME.MAX.WARN} | <p>The Nginx maximum response time in seconds for trigger expression.</p> | `10` |
-| {$NGINX.STUB_STATUS.HOST} | <p>Hostname or IP of Nginx stub_status host or container.</p> | `localhost` |
-| {$NGINX.STUB_STATUS.PATH} | <p>The path of Nginx stub_status page.</p> | `basic_status` |
-| {$NGINX.STUB_STATUS.PORT} | <p>The port of Nginx stub_status host or container.</p> | `80` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$NGINX.DROP_RATE.MAX.WARN} |<p>The critical rate of the dropped connections for trigger expression.</p> |`1` |
+|{$NGINX.RESPONSE_TIME.MAX.WARN} |<p>The Nginx maximum response time in seconds for trigger expression.</p> |`10` |
+|{$NGINX.STUB_STATUS.HOST} |<p>Hostname or IP of Nginx stub_status host or container.</p> |`localhost` |
+|{$NGINX.STUB_STATUS.PATH} |<p>The path of Nginx stub_status page.</p> |`basic_status` |
+|{$NGINX.STUB_STATUS.PORT} |<p>The port of Nginx stub_status host or container.</p> |`80` |
## Template links
@@ -69,36 +69,36 @@ There are no template links in this template.
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|----------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Nginx | Nginx: Service status | <p>-</p> | ZABBIX_PASSIVE | net.tcp.service[http,"{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| Nginx | Nginx: Service response time | <p>-</p> | ZABBIX_PASSIVE | net.tcp.service.perf[http,"{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PORT}"] |
-| Nginx | Nginx: Requests total | <p>The total number of client requests.</p> | DEPENDENT | nginx.requests.total<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \3`</p> |
-| Nginx | Nginx: Requests per second | <p>The total number of client requests.</p> | DEPENDENT | nginx.requests.total.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \3`</p><p>- CHANGE_PER_SECOND |
-| Nginx | Nginx: Connections accepted per second | <p>The total number of accepted client connections.</p> | DEPENDENT | nginx.connections.accepted.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \1`</p><p>- CHANGE_PER_SECOND |
-| Nginx | Nginx: Connections dropped per second | <p>The total number of dropped client connections.</p> | DEPENDENT | nginx.connections.dropped.rate<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-| Nginx | Nginx: Connections handled per second | <p>The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).</p> | DEPENDENT | nginx.connections.handled.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \2`</p><p>- CHANGE_PER_SECOND |
-| Nginx | Nginx: Connections active | <p>The current number of active client connections including Waiting connections.</p> | DEPENDENT | nginx.connections.active<p>**Preprocessing**:</p><p>- REGEX: `Active connections: ([0-9]+) \1`</p> |
-| Nginx | Nginx: Connections reading | <p>The current number of connections where nginx is reading the request header.</p> | DEPENDENT | nginx.connections.reading<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \1`</p> |
-| Nginx | Nginx: Connections waiting | <p>The current number of idle client connections waiting for a request.</p> | DEPENDENT | nginx.connections.waiting<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \3`</p> |
-| Nginx | Nginx: Connections writing | <p>The current number of connections where nginx is writing the response back to the client.</p> | DEPENDENT | nginx.connections.writing<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \2`</p> |
-| Nginx | Nginx: Number of processes running | <p>Number of the Nginx processes running.</p> | ZABBIX_PASSIVE | proc.num[nginx] |
-| Nginx | Nginx: Memory usage (vsize) | <p>Virtual memory size used by process in bytes.</p> | ZABBIX_PASSIVE | proc.mem[nginx,,,,vsize] |
-| Nginx | Nginx: Memory usage (rss) | <p>Resident set size memory used by process in bytes.</p> | ZABBIX_PASSIVE | proc.mem[nginx,,,,rss] |
-| Nginx | Nginx: CPU utilization | <p>Process CPU utilization percentage.</p> | ZABBIX_PASSIVE | proc.cpu.util[nginx] |
-| Nginx | Nginx: Version | <p>-</p> | DEPENDENT | nginx.version<p>**Preprocessing**:</p><p>- REGEX: `Server: nginx\/(.+(?<!\r)) \1`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| Zabbix_raw_items | Nginx: Get stub status page | <p>The following status information is provided:</p><p>Active connections - the current number of active client connections including Waiting connections.</p><p>Accepts - the total number of accepted client connections.</p><p>Handled - the total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).</p><p>Requests - the total number of client requests.</p><p>Reading - the current number of connections where nginx is reading the request header.</p><p>Writing - the current number of connections where nginx is writing the response back to the client.</p><p>Waiting - the current number of idle client connections waiting for a request.</p><p>https://nginx.org/en/docs/http/ngx_http_stub_status_module.html</p> | ZABBIX_PASSIVE | web.page.get["{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PATH}","{$NGINX.STUB_STATUS.PORT}"] |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|Nginx |Nginx: Service status |<p>-</p> |ZABBIX_PASSIVE |net.tcp.service[http,"{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|Nginx |Nginx: Service response time |<p>-</p> |ZABBIX_PASSIVE |net.tcp.service.perf[http,"{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PORT}"] |
+|Nginx |Nginx: Requests total |<p>The total number of client requests.</p> |DEPENDENT |nginx.requests.total<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \3`</p> |
+|Nginx |Nginx: Requests per second |<p>The total number of client requests.</p> |DEPENDENT |nginx.requests.total.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \3`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Connections accepted per second |<p>The total number of accepted client connections.</p> |DEPENDENT |nginx.connections.accepted.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \1`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Connections dropped per second |<p>The total number of dropped client connections.</p> |DEPENDENT |nginx.connections.dropped.rate<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Connections handled per second |<p>The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).</p> |DEPENDENT |nginx.connections.handled.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \2`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Connections active |<p>The current number of active client connections including Waiting connections.</p> |DEPENDENT |nginx.connections.active<p>**Preprocessing**:</p><p>- REGEX: `Active connections: ([0-9]+) \1`</p> |
+|Nginx |Nginx: Connections reading |<p>The current number of connections where nginx is reading the request header.</p> |DEPENDENT |nginx.connections.reading<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \1`</p> |
+|Nginx |Nginx: Connections waiting |<p>The current number of idle client connections waiting for a request.</p> |DEPENDENT |nginx.connections.waiting<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \3`</p> |
+|Nginx |Nginx: Connections writing |<p>The current number of connections where nginx is writing the response back to the client.</p> |DEPENDENT |nginx.connections.writing<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \2`</p> |
+|Nginx |Nginx: Number of processes running |<p>Number of the Nginx processes running.</p> |ZABBIX_PASSIVE |proc.num[nginx] |
+|Nginx |Nginx: Memory usage (vsize) |<p>Virtual memory size used by process in bytes.</p> |ZABBIX_PASSIVE |proc.mem[nginx,,,,vsize] |
+|Nginx |Nginx: Memory usage (rss) |<p>Resident set size memory used by process in bytes.</p> |ZABBIX_PASSIVE |proc.mem[nginx,,,,rss] |
+|Nginx |Nginx: CPU utilization |<p>Process CPU utilization percentage.</p> |ZABBIX_PASSIVE |proc.cpu.util[nginx] |
+|Nginx |Nginx: Version |<p>-</p> |DEPENDENT |nginx.version<p>**Preprocessing**:</p><p>- REGEX: `Server: nginx\/(.+(?<!\r)) \1`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Zabbix_raw_items |Nginx: Get stub status page |<p>The following status information is provided:</p><p>Active connections - the current number of active client connections including Waiting connections.</p><p>Accepts - the total number of accepted client connections.</p><p>Handled - the total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).</p><p>Requests - the total number of client requests.</p><p>Reading - the current number of connections where nginx is reading the request header.</p><p>Writing - the current number of connections where nginx is writing the response back to the client.</p><p>Waiting - the current number of idle client connections waiting for a request.</p><p>https://nginx.org/en/docs/http/ngx_http_stub_status_module.html</p> |ZABBIX_PASSIVE |web.page.get["{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PATH}","{$NGINX.STUB_STATUS.PORT}"] |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|-----------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------------------------------------------------------------------------------------------------------------|
-| Nginx: Service is down | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service[http,"{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PORT}"].last()}=0` | AVERAGE | <p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Process is not running</p> |
-| Nginx: Service response time is too high (over {$NGINX.RESPONSE_TIME.MAX.WARN}s for 5m) | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service.perf[http,"{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PORT}"].min(5m)}>{$NGINX.RESPONSE_TIME.MAX.WARN}` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Process is not running</p><p>- Nginx: Service is down</p> |
-| Nginx: High connections drop rate (more than {$NGINX.DROP_RATE.MAX.WARN} for 5m) | <p>The dropping rate connections is greater than {$NGINX.DROP_RATE.MAX.WARN} for the last 5 minutes.</p> | `{TEMPLATE_NAME:nginx.connections.dropped.rate.min(5m)} > {$NGINX.DROP_RATE.MAX.WARN}` | WARNING | <p>**Depends on**:</p><p>- Nginx: Process is not running</p><p>- Nginx: Service is down</p> |
-| Nginx: Process is not running | <p>-</p> | `{TEMPLATE_NAME:proc.num[nginx].last()}=0` | HIGH | |
-| Nginx: Version has changed (new version: {ITEM.VALUE}) | <p>Nginx version has changed. Ack to close.</p> | `{TEMPLATE_NAME:nginx.version.diff()}=1 and {TEMPLATE_NAME:nginx.version.strlen()}>0` | INFO | <p>Manual close: YES</p> |
-| Nginx: Failed to fetch stub status page (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes.</p> | `{TEMPLATE_NAME:web.page.get["{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PATH}","{$NGINX.STUB_STATUS.PORT}"].str("HTTP/1.1 200")}=0 or {TEMPLATE_NAME:web.page.get["{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PATH}","{$NGINX.STUB_STATUS.PORT}"].nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Process is not running</p><p>- Nginx: Service is down</p> |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|Nginx: Service is down |<p>-</p> |`last(/Nginx by Zabbix agent/net.tcp.service[http,"{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Process is not running</p> |
+|Nginx: Service response time is too high (over {$NGINX.RESPONSE_TIME.MAX.WARN}s for 5m) |<p>-</p> |`min(/Nginx by Zabbix agent/net.tcp.service.perf[http,"{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PORT}"],5m)>{$NGINX.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Process is not running</p><p>- Nginx: Service is down</p> |
+|Nginx: High connections drop rate (more than {$NGINX.DROP_RATE.MAX.WARN} for 5m) |<p>The dropping rate connections is greater than {$NGINX.DROP_RATE.MAX.WARN} for the last 5 minutes.</p> |`min(/Nginx by Zabbix agent/nginx.connections.dropped.rate,5m) > {$NGINX.DROP_RATE.MAX.WARN}` |WARNING |<p>**Depends on**:</p><p>- Nginx: Process is not running</p><p>- Nginx: Service is down</p> |
+|Nginx: Process is not running |<p>-</p> |`last(/Nginx by Zabbix agent/proc.num[nginx])=0` |HIGH | |
+|Nginx: Version has changed (new version: {ITEM.VALUE}) |<p>Nginx version has changed. Ack to close.</p> |`last(/Nginx by Zabbix agent/nginx.version,#1)<>last(/Nginx by Zabbix agent/nginx.version,#2) and length(last(/Nginx by Zabbix agent/nginx.version))>0` |INFO |<p>Manual close: YES</p> |
+|Nginx: Failed to fetch stub status page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`find(/Nginx by Zabbix agent/web.page.get["{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PATH}","{$NGINX.STUB_STATUS.PORT}"],,"like","HTTP/1.1 200")=0 or nodata(/Nginx by Zabbix agent/web.page.get["{$NGINX.STUB_STATUS.HOST}","{$NGINX.STUB_STATUS.PATH}","{$NGINX.STUB_STATUS.PORT}"],30m)=1 ` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Process is not running</p><p>- Nginx: Service is down</p> |
## Feedback
diff --git a/templates/app/nginx_agent/template_app_nginx_agent.yaml b/templates/app/nginx_agent/template_app_nginx_agent.yaml
index 7646ceded78..2c80d0d605d 100644
--- a/templates/app/nginx_agent/template_app_nginx_agent.yaml
+++ b/templates/app/nginx_agent/template_app_nginx_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:23Z'
+ date: '2021-12-19T15:19:45Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -17,7 +17,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/384765-discussion-thread-for-official-zabbix-template-nginx
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/nginx_http/README.md b/templates/app/nginx_http/README.md
index f608495c2e6..c67d6dae3ca 100644
--- a/templates/app/nginx_http/README.md
+++ b/templates/app/nginx_http/README.md
@@ -10,10 +10,10 @@ Most of the metrics are collected in one go, thanks to Zabbix bulk data collecti
Template `Nginx by HTTP` collects metrics by polling [ngx_http_stub_status_module](https://nginx.ru/en/docs/http/ngx_http_stub_status_module.html) with HTTP agent remotely:
```text
-Active connections: 291
+Active connections: 291
server accepts handled requests
-16630948 16630948 31070465
-Reading: 6 Writing: 179 Waiting: 106
+16630948 16630948 31070465
+Reading: 6 Writing: 179 Waiting: 106
```
Note that this solution supports https and redirects.
@@ -30,7 +30,8 @@ This template was tested on:
Setup [ngx_http_stub_status_module](https://nginx.ru/en/docs/http/ngx_http_stub_status_module.html).
Test availability of http_stub_status module with `nginx -V 2>&1 | grep -o with-http_stub_status_module`.
-Example configuration of Nginx:
+Example configuration of Nginx:
+
```text
location = /basic_status {
stub_status;
@@ -48,13 +49,13 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|---------------------------------|-----------------------------------------------------------------------------|----------------|
-| {$NGINX.DROP_RATE.MAX.WARN} | <p>The critical rate of the dropped connections for trigger expression.</p> | `1` |
-| {$NGINX.RESPONSE_TIME.MAX.WARN} | <p>The Nginx maximum response time in seconds for trigger expression.</p> | `10` |
-| {$NGINX.STUB_STATUS.PATH} | <p>The path of Nginx stub_status page.</p> | `basic_status` |
-| {$NGINX.STUB_STATUS.PORT} | <p>The port of Nginx stub_status host or container.</p> | `80` |
-| {$NGINX.STUB_STATUS.SCHEME} | <p>The protocol http or https of Nginx stub_status host or container.</p> | `http` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$NGINX.DROP_RATE.MAX.WARN} |<p>The critical rate of the dropped connections for trigger expression.</p> |`1` |
+|{$NGINX.RESPONSE_TIME.MAX.WARN} |<p>The Nginx maximum response time in seconds for trigger expression.</p> |`10` |
+|{$NGINX.STUB_STATUS.PATH} |<p>The path of Nginx stub_status page.</p> |`basic_status` |
+|{$NGINX.STUB_STATUS.PORT} |<p>The port of Nginx stub_status host or container.</p> |`80` |
+|{$NGINX.STUB_STATUS.SCHEME} |<p>The protocol http or https of Nginx stub_status host or container.</p> |`http` |
## Template links
@@ -65,31 +66,31 @@ There are no template links in this template.
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|----------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Nginx | Nginx: Service status | <p>-</p> | SIMPLE | net.tcp.service[http,"{HOST.CONN}","{$NGINX.STUB_STATUS.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| Nginx | Nginx: Service response time | <p>-</p> | SIMPLE | net.tcp.service.perf[http,"{HOST.CONN}","{$NGINX.STUB_STATUS.PORT}"] |
-| Nginx | Nginx: Requests total | <p>The total number of client requests.</p> | DEPENDENT | nginx.requests.total<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \3`</p> |
-| Nginx | Nginx: Requests per second | <p>The total number of client requests.</p> | DEPENDENT | nginx.requests.total.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \3`</p><p>- CHANGE_PER_SECOND |
-| Nginx | Nginx: Connections accepted per second | <p>The total number of accepted client connections.</p> | DEPENDENT | nginx.connections.accepted.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \1`</p><p>- CHANGE_PER_SECOND |
-| Nginx | Nginx: Connections dropped per second | <p>The total number of dropped client connections.</p> | DEPENDENT | nginx.connections.dropped.rate<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND |
-| Nginx | Nginx: Connections handled per second | <p>The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).</p> | DEPENDENT | nginx.connections.handled.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \2`</p><p>- CHANGE_PER_SECOND |
-| Nginx | Nginx: Connections active | <p>The current number of active client connections including Waiting connections.</p> | DEPENDENT | nginx.connections.active<p>**Preprocessing**:</p><p>- REGEX: `Active connections: ([0-9]+) \1`</p> |
-| Nginx | Nginx: Connections reading | <p>The current number of connections where nginx is reading the request header.</p> | DEPENDENT | nginx.connections.reading<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \1`</p> |
-| Nginx | Nginx: Connections waiting | <p>The current number of idle client connections waiting for a request.</p> | DEPENDENT | nginx.connections.waiting<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \3`</p> |
-| Nginx | Nginx: Connections writing | <p>The current number of connections where nginx is writing the response back to the client.</p> | DEPENDENT | nginx.connections.writing<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \2`</p> |
-| Nginx | Nginx: Version | <p>-</p> | DEPENDENT | nginx.version<p>**Preprocessing**:</p><p>- REGEX: `Server: nginx\/(.+(?<!\r)) \1`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| Zabbix_raw_items | Nginx: Get stub status page | <p>The following status information is provided:</p><p>Active connections - the current number of active client connections including Waiting connections.</p><p>Accepts - the total number of accepted client connections.</p><p>Handled - the total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).</p><p>Requests - the total number of client requests.</p><p>Reading - the current number of connections where nginx is reading the request header.</p><p>Writing - the current number of connections where nginx is writing the response back to the client.</p><p>Waiting - the current number of idle client connections waiting for a request.</p><p>https://nginx.org/en/docs/http/ngx_http_stub_status_module.html</p> | HTTP_AGENT | nginx.get_stub_status |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|Nginx |Nginx: Service status |<p>-</p> |SIMPLE |net.tcp.service[http,"{HOST.CONN}","{$NGINX.STUB_STATUS.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|Nginx |Nginx: Service response time |<p>-</p> |SIMPLE |net.tcp.service.perf[http,"{HOST.CONN}","{$NGINX.STUB_STATUS.PORT}"] |
+|Nginx |Nginx: Requests total |<p>The total number of client requests.</p> |DEPENDENT |nginx.requests.total<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \3`</p> |
+|Nginx |Nginx: Requests per second |<p>The total number of client requests.</p> |DEPENDENT |nginx.requests.total.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \3`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Connections accepted per second |<p>The total number of accepted client connections.</p> |DEPENDENT |nginx.connections.accepted.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \1`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Connections dropped per second |<p>The total number of dropped client connections.</p> |DEPENDENT |nginx.connections.dropped.rate<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Connections handled per second |<p>The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).</p> |DEPENDENT |nginx.connections.handled.rate<p>**Preprocessing**:</p><p>- REGEX: `server accepts handled requests\s+([0-9]+) ([0-9]+) ([0-9]+) \2`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Connections active |<p>The current number of active client connections including Waiting connections.</p> |DEPENDENT |nginx.connections.active<p>**Preprocessing**:</p><p>- REGEX: `Active connections: ([0-9]+) \1`</p> |
+|Nginx |Nginx: Connections reading |<p>The current number of connections where nginx is reading the request header.</p> |DEPENDENT |nginx.connections.reading<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \1`</p> |
+|Nginx |Nginx: Connections waiting |<p>The current number of idle client connections waiting for a request.</p> |DEPENDENT |nginx.connections.waiting<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \3`</p> |
+|Nginx |Nginx: Connections writing |<p>The current number of connections where nginx is writing the response back to the client.</p> |DEPENDENT |nginx.connections.writing<p>**Preprocessing**:</p><p>- REGEX: `Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+) \2`</p> |
+|Nginx |Nginx: Version |<p>-</p> |DEPENDENT |nginx.version<p>**Preprocessing**:</p><p>- REGEX: `Server: nginx\/(.+(?<!\r)) \1`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Zabbix_raw_items |Nginx: Get stub status page |<p>The following status information is provided:</p><p>Active connections - the current number of active client connections including Waiting connections.</p><p>Accepts - the total number of accepted client connections.</p><p>Handled - the total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).</p><p>Requests - the total number of client requests.</p><p>Reading - the current number of connections where nginx is reading the request header.</p><p>Writing - the current number of connections where nginx is writing the response back to the client.</p><p>Waiting - the current number of idle client connections waiting for a request.</p><p>https://nginx.org/en/docs/http/ngx_http_stub_status_module.html</p> |HTTP_AGENT |nginx.get_stub_status |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|-----------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|----------|-------------------------------------------------------------------------------|
-| Nginx: Service is down | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service[http,"{HOST.CONN}","{$NGINX.STUB_STATUS.PORT}"].last()}=0` | AVERAGE | <p>Manual close: YES</p> |
-| Nginx: Service response time is too high (over {$NGINX.RESPONSE_TIME.MAX.WARN}s for 5m) | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service.perf[http,"{HOST.CONN}","{$NGINX.STUB_STATUS.PORT}"].min(5m)}>{$NGINX.RESPONSE_TIME.MAX.WARN}` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Service is down</p> |
-| Nginx: High connections drop rate (more than {$NGINX.DROP_RATE.MAX.WARN} for 5m) | <p>The dropping rate connections is greater than {$NGINX.DROP_RATE.MAX.WARN} for the last 5 minutes.</p> | `{TEMPLATE_NAME:nginx.connections.dropped.rate.min(5m)} > {$NGINX.DROP_RATE.MAX.WARN}` | WARNING | <p>**Depends on**:</p><p>- Nginx: Service is down</p> |
-| Nginx: Version has changed (new version: {ITEM.VALUE}) | <p>Nginx version has changed. Ack to close.</p> | `{TEMPLATE_NAME:nginx.version.diff()}=1 and {TEMPLATE_NAME:nginx.version.strlen()}>0` | INFO | <p>Manual close: YES</p> |
-| Nginx: Failed to fetch stub status page (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes.</p> | `{TEMPLATE_NAME:nginx.get_stub_status.str("HTTP/1.1 200")}=0 or {TEMPLATE_NAME:nginx.get_stub_status.nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Service is down</p> |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|Nginx: Service is down |<p>-</p> |`last(/Nginx by HTTP/net.tcp.service[http,"{HOST.CONN}","{$NGINX.STUB_STATUS.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|Nginx: Service response time is too high (over {$NGINX.RESPONSE_TIME.MAX.WARN}s for 5m) |<p>-</p> |`min(/Nginx by HTTP/net.tcp.service.perf[http,"{HOST.CONN}","{$NGINX.STUB_STATUS.PORT}"],5m)>{$NGINX.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Service is down</p> |
+|Nginx: High connections drop rate (more than {$NGINX.DROP_RATE.MAX.WARN} for 5m) |<p>The dropping rate connections is greater than {$NGINX.DROP_RATE.MAX.WARN} for the last 5 minutes.</p> |`min(/Nginx by HTTP/nginx.connections.dropped.rate,5m) > {$NGINX.DROP_RATE.MAX.WARN}` |WARNING |<p>**Depends on**:</p><p>- Nginx: Service is down</p> |
+|Nginx: Version has changed (new version: {ITEM.VALUE}) |<p>Nginx version has changed. Ack to close.</p> |`last(/Nginx by HTTP/nginx.version,#1)<>last(/Nginx by HTTP/nginx.version,#2) and length(last(/Nginx by HTTP/nginx.version))>0` |INFO |<p>Manual close: YES</p> |
+|Nginx: Failed to fetch stub status page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`find(/Nginx by HTTP/nginx.get_stub_status,,"like","HTTP/1.1 200")=0 or nodata(/Nginx by HTTP/nginx.get_stub_status,30m)=1 ` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- Nginx: Service is down</p> |
## Feedback
diff --git a/templates/app/nginx_http/template_app_nginx_http.yaml b/templates/app/nginx_http/template_app_nginx_http.yaml
index 3f37b838066..1e16e6f0099 100644
--- a/templates/app/nginx_http/template_app_nginx_http.yaml
+++ b/templates/app/nginx_http/template_app_nginx_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:26Z'
+ date: '2021-12-19T15:19:45Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -17,7 +17,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/384765-discussion-thread-for-official-zabbix-template-nginx
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/nginx_plus_http/README.md b/templates/app/nginx_plus_http/README.md
index 96b55486efe..2e753813bee 100644
--- a/templates/app/nginx_plus_http/README.md
+++ b/templates/app/nginx_plus_http/README.md
@@ -4,10 +4,10 @@
## Overview
For Zabbix version: 5.4 and higher
-The template to monitor Nginx Plus by Zabbix that work without any external scripts.
+The template to monitor Nginx Plus by Zabbix that work without any external scripts.
Most of the metrics are collected in one go, thanks to Zabbix bulk data collection.
-The live activity monitoring data is generated by the [NGINX Plus API](http://nginx.org/en/docs/http/ngx_http_api_module.html).
+The live activity monitoring data is generated by the [NGINX Plus API](http://nginx.org/en/docs/http/ngx_http_api_module.html).
@@ -74,14 +74,14 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|HTTP server zones discovery |<p>-</p> |DEPENDENT |nginx.http.server_zones.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- A: {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.ZONE.MATCHES}`</p><p>- B: {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.ZONE.NOT_MATCHES}`</p> |
-|HTTP location zones discovery |<p>-</p> |DEPENDENT |nginx.http.location_zones.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- A: {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.LOCATION.ZONE.MATCHES}`</p><p>- B: {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.LOCATION.ZONE.NOT_MATCHES}`</p> |
-|HTTP upstreams discovery |<p>-</p> |DEPENDENT |nginx.http.upstreams.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- A: {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.UPSTREAM.MATCHES}`</p><p>- B: {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.UPSTREAM.NOT_MATCHES}`</p> |
-|HTTP upstream peers discovery |<p>-</p> |DEPENDENT |nginx.http.upstream.peers.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- A: {#UPSTREAM} MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.UPSTREAM.MATCHES}`</p><p>- B: {#UPSTREAM} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.UPSTREAM.NOT_MATCHES}`</p> |
-|Stream server zones discovery |<p>-</p> |DEPENDENT |nginx.stream.server_zones.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- A: {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.ZONE.MATCHES}`</p><p>- B: {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.ZONE.NOT_MATCHES}`</p> |
-|Stream upstreams discovery |<p>-</p> |DEPENDENT |nginx.stream.upstreams.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- A: {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.UPSTREAM.MATCHES}`</p><p>- B: {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.UPSTREAM.NOT_MATCHES}`</p> |
-|Stream upstream peers discovery |<p>-</p> |DEPENDENT |nginx.stream.upstream.peers.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- A: {#UPSTREAM} MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.UPSTREAM.MATCHES}`</p><p>- B: {#UPSTREAM} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.UPSTREAM.NOT_MATCHES}`</p> |
-|Resolvers discovery |<p>-</p> |DEPENDENT |nginx.resolvers.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- A: {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.RESOLVER.MATCHES}`</p><p>- B: {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.RESOLVER.NOT_MATCHES}`</p> |
+|HTTP server zones discovery |<p>-</p> |DEPENDENT |nginx.http.server_zones.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.ZONE.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.ZONE.NOT_MATCHES}`</p> |
+|HTTP location zones discovery |<p>-</p> |DEPENDENT |nginx.http.location_zones.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.LOCATION.ZONE.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.LOCATION.ZONE.NOT_MATCHES}`</p> |
+|HTTP upstreams discovery |<p>-</p> |DEPENDENT |nginx.http.upstreams.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.UPSTREAM.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.UPSTREAM.NOT_MATCHES}`</p> |
+|HTTP upstream peers discovery |<p>-</p> |DEPENDENT |nginx.http.upstream.peers.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- {#UPSTREAM} MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.UPSTREAM.MATCHES}`</p><p>- {#UPSTREAM} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.HTTP.UPSTREAM.NOT_MATCHES}`</p> |
+|Stream server zones discovery |<p>-</p> |DEPENDENT |nginx.stream.server_zones.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.ZONE.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.ZONE.NOT_MATCHES}`</p> |
+|Stream upstreams discovery |<p>-</p> |DEPENDENT |nginx.stream.upstreams.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.UPSTREAM.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.UPSTREAM.NOT_MATCHES}`</p> |
+|Stream upstream peers discovery |<p>-</p> |DEPENDENT |nginx.stream.upstream.peers.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- {#UPSTREAM} MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.UPSTREAM.MATCHES}`</p><p>- {#UPSTREAM} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.STREAM.UPSTREAM.NOT_MATCHES}`</p> |
+|Resolvers discovery |<p>-</p> |DEPENDENT |nginx.resolvers.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$NGINX.LLD.FILTER.RESOLVER.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$NGINX.LLD.FILTER.RESOLVER.NOT_MATCHES}`</p> |
## Items collected
@@ -92,51 +92,51 @@ There are no template links in this template.
|Nginx |Nginx: Address |<p>The address of the server that accepted status request.</p> |DEPENDENT |nginx.info.address<p>**Preprocessing**:</p><p>- JSONPATH: `$.address`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Nginx |Nginx: Generation |<p>The total number of configuration reloads.</p> |DEPENDENT |nginx.info.generation<p>**Preprocessing**:</p><p>- JSONPATH: `$.generation`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Nginx |Nginx: Uptime |<p>Server uptime.</p> |DEPENDENT |nginx.info.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.load_timestamp`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- JAVASCRIPT: `return Math.floor((Date.now() - new Date(value)) / 1000);`</p> |
-|Nginx |Nginx: Connections accepted, rate |<p>The total number of accepted client connections per second.</p> |DEPENDENT |nginx.connections.accepted.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.accepted`</p><p>- CHANGE_PER_SECOND |
+|Nginx |Nginx: Connections accepted, rate |<p>The total number of accepted client connections per second.</p> |DEPENDENT |nginx.connections.accepted.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.accepted`</p><p>- CHANGE_PER_SECOND</p> |
|Nginx |Nginx: Connections dropped |<p>The total number of dropped client connections.</p> |DEPENDENT |nginx.connections.dropped<p>**Preprocessing**:</p><p>- JSONPATH: `$.dropped`</p> |
|Nginx |Nginx: Connections active |<p>The current number of active client connections.</p> |DEPENDENT |nginx.connections.active<p>**Preprocessing**:</p><p>- JSONPATH: `$.active`</p> |
|Nginx |Nginx: Connections idle |<p>The current number of idle client connections.</p> |DEPENDENT |nginx.connections.idle<p>**Preprocessing**:</p><p>- JSONPATH: `$.idle`</p> |
-|Nginx |Nginx: SSL handshakes, rate |<p>The total number of successful SSL handshakes per second.</p> |DEPENDENT |nginx.ssl.handshakes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.handshakes`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: SSL handshakes failed, rate |<p>The total number of failed SSL handshakes per second.</p> |DEPENDENT |nginx.ssl.handshakes_failed.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.handshakes_failed`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: SSL session reuses, rate |<p>The total number of session reuses during SSL handshake per second.</p> |DEPENDENT |nginx.ssl.session_reuses.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.session_reuses`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Requests total, rate |<p>The total number of client requests per second.</p> |DEPENDENT |nginx.requests.total.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.total`</p><p>- CHANGE_PER_SECOND |
+|Nginx |Nginx: SSL handshakes, rate |<p>The total number of successful SSL handshakes per second.</p> |DEPENDENT |nginx.ssl.handshakes.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.handshakes`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: SSL handshakes failed, rate |<p>The total number of failed SSL handshakes per second.</p> |DEPENDENT |nginx.ssl.handshakes_failed.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.handshakes_failed`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: SSL session reuses, rate |<p>The total number of session reuses during SSL handshake per second.</p> |DEPENDENT |nginx.ssl.session_reuses.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.session_reuses`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Requests total, rate |<p>The total number of client requests per second.</p> |DEPENDENT |nginx.requests.total.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.total`</p><p>- CHANGE_PER_SECOND</p> |
|Nginx |Nginx: Requests current |<p>The current number of client requests.</p> |DEPENDENT |nginx.requests.current<p>**Preprocessing**:</p><p>- JSONPATH: `$.current`</p> |
|Nginx |Nginx: HTTP server zone [{#NAME}]: Processing |<p>The number of client requests that are currently being processed.</p> |DEPENDENT |nginx.http.server_zones.processing[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].processing`</p> |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Requests, rate |<p>The total number of client requests received from clients per second.</p> |DEPENDENT |nginx.http.server_zones.requests.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 1xx, rate |<p>The number of responses with “1xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.1xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.1xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 2xx, rate |<p>The number of responses with “2xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.2xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.2xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 3xx, rate |<p>The number of responses with “3xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.3xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.3xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 4xx, rate |<p>The number of responses with “4xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.4xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.4xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 5xx, rate |<p>The number of responses with “5xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.5xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.5xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses total, rate |<p>The total number of responses sent to clients per second.</p> |DEPENDENT |nginx.http.server_zones.responses.total.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.total`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Discarded, rate |<p>The total number of requests completed without sending a response per second.</p> |DEPENDENT |nginx.http.server_zones.discarded.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].discarded`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Received, rate |<p>The total number of bytes received from clients per second.</p> |DEPENDENT |nginx.http.server_zones.received.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].received`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP server zone [{#NAME}]: Sent, rate |<p>The total number of bytes sent to clients per second.</p> |DEPENDENT |nginx.http.server_zones.sent.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sent`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Requests, rate |<p>The total number of client requests received from clients per second.</p> |DEPENDENT |nginx.http.location_zones.requests.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 1xx, rate |<p>The number of responses with “1xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.1xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.1xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 2xx, rate |<p>The number of responses with “2xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.2xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.2xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 3xx, rate |<p>The number of responses with “3xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.3xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.3xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 4xx, rate |<p>The number of responses with “4xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.4xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.4xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 5xx, rate |<p>The number of responses with “5xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.5xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.5xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses total, rate |<p>The total number of responses sent to clients per second.</p> |DEPENDENT |nginx.http.location_zones.responses.total.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.total`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Discarded, rate |<p>The total number of requests completed without sending a response per second.</p> |DEPENDENT |nginx.http.location_zones.discarded.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].discarded`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Received, rate |<p>The total number of bytes received from clients per second.</p> |DEPENDENT |nginx.http.location_zones.received.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].received`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP location zone [{#NAME}]: Sent, rate |<p>The total number of bytes sent to clients per second.</p> |DEPENDENT |nginx.http.location_zones.sent.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sent`</p><p>- CHANGE_PER_SECOND |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Requests, rate |<p>The total number of client requests received from clients per second.</p> |DEPENDENT |nginx.http.server_zones.requests.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 1xx, rate |<p>The number of responses with “1xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.1xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.1xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 2xx, rate |<p>The number of responses with “2xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.2xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.2xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 3xx, rate |<p>The number of responses with “3xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.3xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.3xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 4xx, rate |<p>The number of responses with “4xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.4xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.4xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses 5xx, rate |<p>The number of responses with “5xx” status codes per second.</p> |DEPENDENT |nginx.http.server_zones.responses.5xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.5xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Responses total, rate |<p>The total number of responses sent to clients per second.</p> |DEPENDENT |nginx.http.server_zones.responses.total.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.total`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Discarded, rate |<p>The total number of requests completed without sending a response per second.</p> |DEPENDENT |nginx.http.server_zones.discarded.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].discarded`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Received, rate |<p>The total number of bytes received from clients per second.</p> |DEPENDENT |nginx.http.server_zones.received.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].received`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP server zone [{#NAME}]: Sent, rate |<p>The total number of bytes sent to clients per second.</p> |DEPENDENT |nginx.http.server_zones.sent.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sent`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Requests, rate |<p>The total number of client requests received from clients per second.</p> |DEPENDENT |nginx.http.location_zones.requests.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 1xx, rate |<p>The number of responses with “1xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.1xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.1xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 2xx, rate |<p>The number of responses with “2xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.2xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.2xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 3xx, rate |<p>The number of responses with “3xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.3xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.3xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 4xx, rate |<p>The number of responses with “4xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.4xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.4xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses 5xx, rate |<p>The number of responses with “5xx” status codes per second.</p> |DEPENDENT |nginx.http.location_zones.responses.5xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.5xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Responses total, rate |<p>The total number of responses sent to clients per second.</p> |DEPENDENT |nginx.http.location_zones.responses.total.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.total`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Discarded, rate |<p>The total number of requests completed without sending a response per second.</p> |DEPENDENT |nginx.http.location_zones.discarded.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].discarded`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Received, rate |<p>The total number of bytes received from clients per second.</p> |DEPENDENT |nginx.http.location_zones.received.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].received`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP location zone [{#NAME}]: Sent, rate |<p>The total number of bytes sent to clients per second.</p> |DEPENDENT |nginx.http.location_zones.sent.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sent`</p><p>- CHANGE_PER_SECOND</p> |
|Nginx |Nginx: HTTP upstream [{#NAME}]: Keepalive |<p>The current number of idle keepalive connections.</p> |DEPENDENT |nginx.http.upstreams.keepalive[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].keepalive`</p> |
|Nginx |Nginx: HTTP upstream [{#NAME}]: Zombies |<p>The current number of servers removed from the group but still processing active client requests.</p> |DEPENDENT |nginx.http.upstreams.zombies[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].zombies`</p> |
-|Nginx |Nginx: HTTP upstream [{#NAME}]: Zone |<p>The name of the shared memory zone that keeps the group’s configuration and run-time state.</p> |DEPENDENT |nginx.http.upstreams.zone[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].zone`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|Nginx |Nginx: HTTP upstream [{#NAME}]: Zone |<p>The name of the shared memory zone that keeps the group's configuration and run-time state.</p> |DEPENDENT |nginx.http.upstreams.zone[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].zone`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: State |<p>Current state, which may be one of “up”, “draining”, “down”, “unavail”, “checking”, and “unhealthy”.</p> |DEPENDENT |nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].state.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Active |<p>The current number of active connections.</p> |DEPENDENT |nginx.http.upstream.peer.active[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].active.first()`</p> |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Requests, rate |<p>The total number of client requests forwarded to this server per second.</p> |DEPENDENT |nginx.http.upstream.peer.requests.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].requests.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 1xx, rate |<p>The number of responses with “1xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.1xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.1xx.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 2xx, rate |<p>The number of responses with “2xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.2xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.2xx.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 3xx, rate |<p>The number of responses with “3xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.3xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.3xx.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 4xx, rate |<p>The number of responses with “4xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.4xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.4xx.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 5xx, rate |<p>The number of responses with “5xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.5xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.5xx.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses total, rate |<p>The total number of responses obtained from this server.</p> |DEPENDENT |nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.total.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Sent, rate |<p>The total number of bytes sent to this server per second.</p> |DEPENDENT |nginx.http.upstream.peer.sent.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].sent.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Received, rate |<p>The total number of bytes received from this server per second.</p> |DEPENDENT |nginx.http.upstream.peer.received.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].received.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Fails, rate |<p>The total number of unsuccessful attempts to communicate with the server per second.</p> |DEPENDENT |nginx.http.upstream.peer.fails.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].fails.first()`</p><p>- CHANGE_PER_SECOND |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Requests, rate |<p>The total number of client requests forwarded to this server per second.</p> |DEPENDENT |nginx.http.upstream.peer.requests.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].requests.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 1xx, rate |<p>The number of responses with “1xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.1xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.1xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 2xx, rate |<p>The number of responses with “2xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.2xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.2xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 3xx, rate |<p>The number of responses with “3xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.3xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.3xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 4xx, rate |<p>The number of responses with “4xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.4xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.4xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses 5xx, rate |<p>The number of responses with “5xx” status codes per second.</p> |DEPENDENT |nginx.http.upstream.peer.responses.5xx.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.5xx.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Responses total, rate |<p>The total number of responses obtained from this server.</p> |DEPENDENT |nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].responses.total.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Sent, rate |<p>The total number of bytes sent to this server per second.</p> |DEPENDENT |nginx.http.upstream.peer.sent.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].sent.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Received, rate |<p>The total number of bytes received from this server per second.</p> |DEPENDENT |nginx.http.upstream.peer.received.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].received.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Fails, rate |<p>The total number of unsuccessful attempts to communicate with the server per second.</p> |DEPENDENT |nginx.http.upstream.peer.fails.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].fails.first()`</p><p>- CHANGE_PER_SECOND</p> |
|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Unavail |<p>How many times the server became unavailable for client requests (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.</p> |DEPENDENT |nginx.http.upstream.peer.unavail.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].unavail.first()`</p> |
|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Header time |<p>The average time to get the response header from the server.</p> |DEPENDENT |nginx.http.upstream.peer.header_time.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].header_time.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Response time |<p>The average time to get the full response from the server.</p> |DEPENDENT |nginx.http.upstream.peer.response_time.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].response_time.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
@@ -144,21 +144,21 @@ There are no template links in this template.
|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Health checks, fails |<p>The number of failed health checks.</p> |DEPENDENT |nginx.http.upstream.peer.health_checks.fails[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].health_checks.fails.first()`</p> |
|Nginx |Nginx: HTTP upstream [{#UPSTREAM}] peer [{#PEER}]: Health checks, unhealthy |<p>How many times the server became unhealthy (state “unhealthy”).</p> |DEPENDENT |nginx.http.upstream.peer.health_checks.unhealthy[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].health_checks.unhealthy.first()`</p> |
|Nginx |Nginx: Stream server zone [{#NAME}]: Processing |<p>The number of client connections that are currently being processed.</p> |DEPENDENT |nginx.stream.server_zones.processing[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].processing`</p> |
-|Nginx |Nginx: Stream server zone [{#NAME}]: Connections, rate |<p>The total number of connections accepted from clients per second.</p> |DEPENDENT |nginx.stream.server_zones.connections.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].connections`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream server zone [{#NAME}]: Sessions 2xx, rate |<p>The total number of sessions completed with status codes “2xx” per second.</p> |DEPENDENT |nginx.stream.server_zones.sessions.2xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sessions.2xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream server zone [{#NAME}]: Sessions 4xx, rate |<p>The total number of sessions completed with status codes “4xx” per second.</p> |DEPENDENT |nginx.stream.server_zones.sessions.4xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sessions.4xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream server zone [{#NAME}]: Sessions 5xx, rate |<p>The total number of sessions completed with status codes “5xx” per second.</p> |DEPENDENT |nginx.stream.server_zones.sessions.5xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sessions.5xx`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream server zone [{#NAME}]: Sessions total, rate |<p>The total number of completed client sessions per second.</p> |DEPENDENT |nginx.stream.server_zones.sessions.total.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sessions.total`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream server zone [{#NAME}]: Discarded, rate |<p>The total number of connections completed without creating a session per second.</p> |DEPENDENT |nginx.stream.server_zones.discarded.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].discarded`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream server zone [{#NAME}]: Received, rate |<p>The total number of bytes received from clients per second.</p> |DEPENDENT |nginx.stream.server_zones.received.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].received`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream server zone [{#NAME}]: Sent, rate |<p>The total number of bytes sent to clients per second.</p> |DEPENDENT |nginx.stream.server_zones.sent.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sent`</p><p>- CHANGE_PER_SECOND |
+|Nginx |Nginx: Stream server zone [{#NAME}]: Connections, rate |<p>The total number of connections accepted from clients per second.</p> |DEPENDENT |nginx.stream.server_zones.connections.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].connections`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream server zone [{#NAME}]: Sessions 2xx, rate |<p>The total number of sessions completed with status codes “2xx” per second.</p> |DEPENDENT |nginx.stream.server_zones.sessions.2xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sessions.2xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream server zone [{#NAME}]: Sessions 4xx, rate |<p>The total number of sessions completed with status codes “4xx” per second.</p> |DEPENDENT |nginx.stream.server_zones.sessions.4xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sessions.4xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream server zone [{#NAME}]: Sessions 5xx, rate |<p>The total number of sessions completed with status codes “5xx” per second.</p> |DEPENDENT |nginx.stream.server_zones.sessions.5xx.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sessions.5xx`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream server zone [{#NAME}]: Sessions total, rate |<p>The total number of completed client sessions per second.</p> |DEPENDENT |nginx.stream.server_zones.sessions.total.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sessions.total`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream server zone [{#NAME}]: Discarded, rate |<p>The total number of connections completed without creating a session per second.</p> |DEPENDENT |nginx.stream.server_zones.discarded.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].discarded`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream server zone [{#NAME}]: Received, rate |<p>The total number of bytes received from clients per second.</p> |DEPENDENT |nginx.stream.server_zones.received.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].received`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream server zone [{#NAME}]: Sent, rate |<p>The total number of bytes sent to clients per second.</p> |DEPENDENT |nginx.stream.server_zones.sent.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].sent`</p><p>- CHANGE_PER_SECOND</p> |
|Nginx |Nginx: Stream upstream [{#NAME}]: Zombies | |DEPENDENT |nginx.stream.upstreams.zombies[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].zombies`</p> |
|Nginx |Nginx: Stream upstream [{#NAME}]: Zone | |DEPENDENT |nginx.stream.upstreams.zone[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].zone`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: State |<p>Current state, which may be one of “up”, “draining”, “down”, “unavail”, “checking”, and “unhealthy”.</p> |DEPENDENT |nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].state.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Active |<p>The current number of connections.</p> |DEPENDENT |nginx.stream.upstream.peer.active[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].active.first()`</p> |
-|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Sent, rate |<p>The total number of bytes sent to this server per second.</p> |DEPENDENT |nginx.stream.upstream.peer.sent.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].sent.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Received, rate |<p>The total number of bytes received from this server per second.</p> |DEPENDENT |nginx.stream.upstream.peer.received.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].received.first()`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Fails, rate |<p>The total number of unsuccessful attempts to communicate with the server per second.</p> |DEPENDENT |nginx.stream.upstream.peer.fails.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].fails.first()`</p><p>- CHANGE_PER_SECOND |
+|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Sent, rate |<p>The total number of bytes sent to this server per second.</p> |DEPENDENT |nginx.stream.upstream.peer.sent.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].sent.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Received, rate |<p>The total number of bytes received from this server per second.</p> |DEPENDENT |nginx.stream.upstream.peer.received.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].received.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Fails, rate |<p>The total number of unsuccessful attempts to communicate with the server per second.</p> |DEPENDENT |nginx.stream.upstream.peer.fails.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].fails.first()`</p><p>- CHANGE_PER_SECOND</p> |
|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Unavail |<p>How many times the server became unavailable for client requests (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.</p> |DEPENDENT |nginx.stream.upstream.peer.unavail.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].unavail.first()`</p> |
|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Connections |<p>The total number of client connections forwarded to this server.</p> |DEPENDENT |nginx.stream.upstream.peer.connections.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].connections.first()`</p> |
|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Connect time |<p>The average time to connect to the upstream server.</p> |DEPENDENT |nginx.stream.upstream.peer.connect_time.rate[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].connect_time.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
@@ -167,17 +167,17 @@ There are no template links in this template.
|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Health checks, check |<p>The total number of health check requests made.</p> |DEPENDENT |nginx.stream.upstream.peer.health_checks.checks[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].health_checks.checks.first()`</p> |
|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Health checks, fails |<p>The number of failed health checks.</p> |DEPENDENT |nginx.stream.upstream.peer.health_checks.fails[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].health_checks.fails.first()`</p> |
|Nginx |Nginx: Stream upstream [{#UPSTREAM}] peer [{#PEER}]: Health checks, unhealthy |<p>How many times the server became unhealthy (state “unhealthy”).</p> |DEPENDENT |nginx.stream.upstream.peer.health_checks.unhealthy[{#UPSTREAM},{#PEER}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#UPSTREAM}'].peers[?(@.server == '{#PEER}')].health_checks.unhealthy.first()`</p> |
-|Nginx |Nginx: Resolver [{#NAME}]: Requests name, rate |<p>The total number of requests to resolve names to addresses per second.</p> |DEPENDENT |nginx.resolvers.requests.name.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests.name`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Requests srv, rate |<p>The total number of requests to resolve SRV records per second.</p> |DEPENDENT |nginx.resolvers.requests.srv.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests.srv`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Requests addr, rate |<p>The total number of requests to resolve addresses to names per second.</p> |DEPENDENT |nginx.resolvers.requests.addr.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests.addr`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Responses noerror, rate |<p>The total number of successful responses per second.</p> |DEPENDENT |nginx.resolvers.responses.noerror.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.noerror`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Responses formerr, rate |<p>The total number of FORMERR (Format error) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.formerr.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.formerr`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Responses servfail, rate |<p>The total number of SERVFAIL (Server failure) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.servfail.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.servfail`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Responses nxdomain, rate |<p>The total number of NXDOMAIN (Host not found) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.nxdomain.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.nxdomain`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Responses notimp, rate |<p>The total number of NOTIMP (Unimplemented) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.notimp.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.notimp`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Responses refused, rate |<p>The total number of REFUSED (Operation refused) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.refused.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.refused`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Responses timedout, rate |<p>The total number of timed out requests per second.</p> |DEPENDENT |nginx.resolvers.responses.timedout.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.timedout`</p><p>- CHANGE_PER_SECOND |
-|Nginx |Nginx: Resolver [{#NAME}]: Responses unknown, rate |<p>The total number of requests completed with an unknown error per second.</p> |DEPENDENT |nginx.resolvers.responses.unknown.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.unknown`</p><p>- CHANGE_PER_SECOND |
+|Nginx |Nginx: Resolver [{#NAME}]: Requests name, rate |<p>The total number of requests to resolve names to addresses per second.</p> |DEPENDENT |nginx.resolvers.requests.name.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests.name`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Requests srv, rate |<p>The total number of requests to resolve SRV records per second.</p> |DEPENDENT |nginx.resolvers.requests.srv.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests.srv`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Requests addr, rate |<p>The total number of requests to resolve addresses to names per second.</p> |DEPENDENT |nginx.resolvers.requests.addr.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].requests.addr`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Responses noerror, rate |<p>The total number of successful responses per second.</p> |DEPENDENT |nginx.resolvers.responses.noerror.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.noerror`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Responses formerr, rate |<p>The total number of FORMERR (Format error) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.formerr.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.formerr`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Responses servfail, rate |<p>The total number of SERVFAIL (Server failure) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.servfail.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.servfail`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Responses nxdomain, rate |<p>The total number of NXDOMAIN (Host not found) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.nxdomain.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.nxdomain`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Responses notimp, rate |<p>The total number of NOTIMP (Unimplemented) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.notimp.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.notimp`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Responses refused, rate |<p>The total number of REFUSED (Operation refused) responses per second.</p> |DEPENDENT |nginx.resolvers.responses.refused.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.refused`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Responses timedout, rate |<p>The total number of timed out requests per second.</p> |DEPENDENT |nginx.resolvers.responses.timedout.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.timedout`</p><p>- CHANGE_PER_SECOND</p> |
+|Nginx |Nginx: Resolver [{#NAME}]: Responses unknown, rate |<p>The total number of requests completed with an unknown error per second.</p> |DEPENDENT |nginx.resolvers.responses.unknown.rate[{#NAME}]<p>**Preprocessing**:</p><p>- JSONPATH: `$['{#NAME}'].responses.unknown`</p><p>- CHANGE_PER_SECOND</p> |
|Zabbix_raw_items |Nginx: Get info |<p>Return status of nginx running instance.</p> |HTTP_AGENT |nginx.info |
|Zabbix_raw_items |Nginx: Get connections |<p>Returns statistics of client connections.</p> |HTTP_AGENT |nginx.connections |
|Zabbix_raw_items |Nginx: Get SSL |<p>Returns SSL statistics.</p> |HTTP_AGENT |nginx.ssl |
@@ -193,15 +193,15 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Nginx: Server response error (text: {ITEM.VALUE}) |<p>-</p> |`{TEMPLATE_NAME:nginx.info.error.strlen()}>0` |HIGH | |
-|Nginx: Version has changed (new version: {ITEM.VALUE}) |<p>Nginx version has changed. Ack to close.</p> |`{TEMPLATE_NAME:nginx.info.version.diff()}=1 and {TEMPLATE_NAME:nginx.info.version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Nginx: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:nginx.info.uptime.last()}<10m` |INFO |<p>Manual close: YES</p> |
-|Nginx: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for metrics for the last 30 minutes</p> |`{TEMPLATE_NAME:nginx.info.uptime.nodata(30m)}=1` |WARNING |<p>Manual close: YES</p> |
-|Nginx: High connections drop rate (more than {$NGINX.DROP_RATE.MAX.WARN} for 5m) |<p>The dropping rate connections is greater than {$NGINX.DROP_RATE.MAX.WARN} for the last 5 minutes.</p> |`{TEMPLATE_NAME:nginx.connections.dropped.min(5m)} > {$NGINX.DROP_RATE.MAX.WARN}` |WARNING | |
-|Nginx: HTTP upstream server is not in UP or DOWN state. |<p>-</p> |`{TEMPLATE_NAME:nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}].str(up)}=0 and {TEMPLATE_NAME:nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}].str(down)}=0` |WARNING | |
-|Nginx: Too many HTTP requests with code 4xx (over {$NGINX.HTTP.UPSTREAM.4XX.MAX.WARN}% for 5m) |<p>-</p> |`{TEMPLATE_NAME:nginx.http.upstream.peer.responses.4xx.rate[{#UPSTREAM},{#PEER}].sum(5m)} > ({Nginx Plus by HTTP:nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}].sum(5m)}*({$NGINX.HTTP.UPSTREAM.4XX.MAX.WARN}/100))` |WARNING | |
-|Nginx: Too many HTTP requests with code 5xx (over {$NGINX.HTTP.UPSTREAM.5XX.MAX.WARN}% for 5m) |<p>-</p> |`{TEMPLATE_NAME:nginx.http.upstream.peer.responses.5xx.rate[{#UPSTREAM},{#PEER}].sum(5m)} > ({Nginx Plus by HTTP:nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}].sum(5m)}*({$NGINX.HTTP.UPSTREAM.5XX.MAX.WARN}/100))` |HIGH | |
-|Nginx: Stream upstream server is not in UP or DOWN state. |<p>-</p> |`{TEMPLATE_NAME:nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}].str(up)}=0 and {TEMPLATE_NAME:nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}].str(down)}=0` |WARNING | |
+|Nginx: Server response error (text: {ITEM.VALUE}) |<p>-</p> |`length(last(/Nginx Plus by HTTP/nginx.info.error))>0` |HIGH | |
+|Nginx: Version has changed (new version: {ITEM.VALUE}) |<p>Nginx version has changed. Ack to close.</p> |`last(/Nginx Plus by HTTP/nginx.info.version,#1)<>last(/Nginx Plus by HTTP/nginx.info.version,#2) and length(last(/Nginx Plus by HTTP/nginx.info.version))>0` |INFO |<p>Manual close: YES</p> |
+|Nginx: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Nginx Plus by HTTP/nginx.info.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|Nginx: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for metrics for the last 30 minutes</p> |`nodata(/Nginx Plus by HTTP/nginx.info.uptime,30m)=1` |WARNING |<p>Manual close: YES</p> |
+|Nginx: High connections drop rate (more than {$NGINX.DROP_RATE.MAX.WARN} for 5m) |<p>The dropping rate connections is greater than {$NGINX.DROP_RATE.MAX.WARN} for the last 5 minutes.</p> |`min(/Nginx Plus by HTTP/nginx.connections.dropped,5m) > {$NGINX.DROP_RATE.MAX.WARN}` |WARNING | |
+|Nginx: HTTP upstream server is not in UP or DOWN state. |<p>-</p> |`find(/Nginx Plus by HTTP/nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","up")=0 and find(/Nginx Plus by HTTP/nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","down")=0` |WARNING | |
+|Nginx: Too many HTTP requests with code 4xx (over {$NGINX.HTTP.UPSTREAM.4XX.MAX.WARN}% for 5m) |<p>-</p> |`sum(/Nginx Plus by HTTP/nginx.http.upstream.peer.responses.4xx.rate[{#UPSTREAM},{#PEER}],5m) > (sum(/Nginx Plus by HTTP/nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}],5m)*({$NGINX.HTTP.UPSTREAM.4XX.MAX.WARN}/100))` |WARNING | |
+|Nginx: Too many HTTP requests with code 5xx (over {$NGINX.HTTP.UPSTREAM.5XX.MAX.WARN}% for 5m) |<p>-</p> |`sum(/Nginx Plus by HTTP/nginx.http.upstream.peer.responses.5xx.rate[{#UPSTREAM},{#PEER}],5m) > (sum(/Nginx Plus by HTTP/nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}],5m)*({$NGINX.HTTP.UPSTREAM.5XX.MAX.WARN}/100))` |HIGH | |
+|Nginx: Stream upstream server is not in UP or DOWN state. |<p>-</p> |`find(/Nginx Plus by HTTP/nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","up")=0 and find(/Nginx Plus by HTTP/nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","down")=0` |WARNING | |
## Feedback
diff --git a/templates/app/nginx_plus_http/template_app_nginx_plus_http.yaml b/templates/app/nginx_plus_http/template_app_nginx_plus_http.yaml
index 5f4c221818e..f77004371e0 100644
--- a/templates/app/nginx_plus_http/template_app_nginx_plus_http.yaml
+++ b/templates/app/nginx_plus_http/template_app_nginx_plus_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-27T13:57:40Z'
+ date: '2021-12-19T15:19:46Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -8,8 +8,8 @@ zabbix_export:
templates:
-
uuid: 8cad0f2564694416b60bd8a414948641
- template: 'Template App Nginx Plus by HTTP'
- name: 'Template App Nginx Plus by HTTP'
+ template: 'Nginx Plus by HTTP'
+ name: 'Nginx Plus by HTTP'
description: |
Get Nginx Plus metrics by HTTP agent.
Metrics are collected by requests to Nginx Plus API.
@@ -17,7 +17,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -101,7 +101,7 @@ zabbix_export:
triggers:
-
uuid: 91c9dab8147a4d0086019119e4e55b5e
- expression: 'min(/Template App Nginx Plus by HTTP/nginx.connections.dropped,5m) > {$NGINX.DROP_RATE.MAX.WARN}'
+ expression: 'min(/Nginx Plus by HTTP/nginx.connections.dropped,5m) > {$NGINX.DROP_RATE.MAX.WARN}'
name: 'Nginx: High connections drop rate (more than {$NGINX.DROP_RATE.MAX.WARN} for 5m)'
priority: WARNING
description: 'The dropping rate connections is greater than {$NGINX.DROP_RATE.MAX.WARN} for the last 5 minutes.'
@@ -238,7 +238,7 @@ zabbix_export:
triggers:
-
uuid: e444c708371748bd831bba6c8d9a92e0
- expression: 'length(last(/Template App Nginx Plus by HTTP/nginx.info.error))>0'
+ expression: 'length(last(/Nginx Plus by HTTP/nginx.info.error))>0'
name: 'Nginx: Server response error (text: {ITEM.VALUE})'
priority: HIGH
-
@@ -292,14 +292,14 @@ zabbix_export:
triggers:
-
uuid: 396fb4be618e45578566971d5399d611
- expression: 'nodata(/Template App Nginx Plus by HTTP/nginx.info.uptime,30m)=1'
+ expression: 'nodata(/Nginx Plus by HTTP/nginx.info.uptime,30m)=1'
name: 'Nginx: Failed to fetch info data (or no data for 30m)'
priority: WARNING
description: 'Zabbix has not received data for metrics for the last 30 minutes'
manual_close: 'YES'
-
uuid: 95622a29bd58444b9f0bd985db99f922
- expression: 'last(/Template App Nginx Plus by HTTP/nginx.info.uptime)<10m'
+ expression: 'last(/Nginx Plus by HTTP/nginx.info.uptime)<10m'
name: 'Nginx: has been restarted (uptime < 10m)'
priority: INFO
description: 'Uptime is less than 10 minutes'
@@ -332,7 +332,7 @@ zabbix_export:
triggers:
-
uuid: a91ff018fe4f4a1495b08adeb2b0005f
- expression: 'last(/Template App Nginx Plus by HTTP/nginx.info.version,#1)<>last(/Template App Nginx Plus by HTTP/nginx.info.version,#2) and length(last(/Template App Nginx Plus by HTTP/nginx.info.version))>0'
+ expression: 'last(/Nginx Plus by HTTP/nginx.info.version,#1)<>last(/Nginx Plus by HTTP/nginx.info.version,#2) and length(last(/Nginx Plus by HTTP/nginx.info.version))>0'
name: 'Nginx: Version has changed (new version: {ITEM.VALUE})'
priority: INFO
description: 'Nginx version has changed. Ack to close.'
@@ -782,7 +782,7 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.location_zones.requests.rate[{#NAME}]'
-
uuid: 6b05907e4c544986b977a1b496ca92e2
@@ -792,35 +792,35 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.location_zones.responses.2xx.rate[{#NAME}]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.location_zones.responses.1xx.rate[{#NAME}]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.location_zones.responses.3xx.rate[{#NAME}]'
-
sortorder: '3'
drawtype: GRADIENT_LINE
color: A54F10
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.location_zones.responses.4xx.rate[{#NAME}]'
-
sortorder: '4'
drawtype: GRADIENT_LINE
color: FC6EA3
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.location_zones.responses.5xx.rate[{#NAME}]'
master_item:
key: nginx.http.location_zones
@@ -836,7 +836,6 @@ zabbix_export:
});
return JSON.stringify(output);
-
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
@@ -1118,7 +1117,7 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.server_zones.requests.rate[{#NAME}]'
-
uuid: 5870e9a861f046b3946c2464c6c5007d
@@ -1128,35 +1127,35 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.server_zones.responses.2xx.rate[{#NAME}]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.server_zones.responses.1xx.rate[{#NAME}]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.server_zones.responses.3xx.rate[{#NAME}]'
-
sortorder: '3'
drawtype: GRADIENT_LINE
color: A54F10
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.server_zones.responses.4xx.rate[{#NAME}]'
-
sortorder: '4'
drawtype: GRADIENT_LINE
color: FC6EA3
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.server_zones.responses.5xx.rate[{#NAME}]'
master_item:
key: nginx.http.server_zones
@@ -1172,7 +1171,6 @@ zabbix_export:
});
return JSON.stringify(output);
-
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
@@ -1572,7 +1570,7 @@ zabbix_export:
trigger_prototypes:
-
uuid: fc6586fe2eef457bac1d71ce0313f95d
- expression: 'find(/Template App Nginx Plus by HTTP/nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","up")=0 and find(/Template App Nginx Plus by HTTP/nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","down")=0'
+ expression: 'find(/Nginx Plus by HTTP/nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","up")=0 and find(/Nginx Plus by HTTP/nginx.http.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","down")=0'
name: 'Nginx: HTTP upstream server is not in UP or DOWN state.'
opdata: 'Current state: {ITEM.LASTVALUE1}'
priority: WARNING
@@ -1598,12 +1596,12 @@ zabbix_export:
trigger_prototypes:
-
uuid: e7b4782a796b4710b50a96f3d24c8208
- expression: 'sum(/Template App Nginx Plus by HTTP/nginx.http.upstream.peer.responses.4xx.rate[{#UPSTREAM},{#PEER}],5m) > (sum(/Template App Nginx Plus by HTTP/nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}],5m)*({$NGINX.HTTP.UPSTREAM.4XX.MAX.WARN}/100))'
+ expression: 'sum(/Nginx Plus by HTTP/nginx.http.upstream.peer.responses.4xx.rate[{#UPSTREAM},{#PEER}],5m) > (sum(/Nginx Plus by HTTP/nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}],5m)*({$NGINX.HTTP.UPSTREAM.4XX.MAX.WARN}/100))'
name: 'Nginx: Too many HTTP requests with code 4xx (over {$NGINX.HTTP.UPSTREAM.4XX.MAX.WARN}% for 5m)'
priority: WARNING
-
uuid: f542573d149547d3844f635bf2a87120
- expression: 'sum(/Template App Nginx Plus by HTTP/nginx.http.upstream.peer.responses.5xx.rate[{#UPSTREAM},{#PEER}],5m) > (sum(/Template App Nginx Plus by HTTP/nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}],5m)*({$NGINX.HTTP.UPSTREAM.5XX.MAX.WARN}/100))'
+ expression: 'sum(/Nginx Plus by HTTP/nginx.http.upstream.peer.responses.5xx.rate[{#UPSTREAM},{#PEER}],5m) > (sum(/Nginx Plus by HTTP/nginx.http.upstream.peer.responses.total.rate[{#UPSTREAM},{#PEER}],5m)*({$NGINX.HTTP.UPSTREAM.5XX.MAX.WARN}/100))'
name: 'Nginx: Too many HTTP requests with code 5xx (over {$NGINX.HTTP.UPSTREAM.5XX.MAX.WARN}% for 5m)'
priority: HIGH
graph_prototypes:
@@ -1615,21 +1613,21 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.upstream.peer.health_checks.checks[{#UPSTREAM},{#PEER}]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.upstream.peer.health_checks.fails[{#UPSTREAM},{#PEER}]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.upstream.peer.health_checks.unhealthy[{#UPSTREAM},{#PEER}]'
-
uuid: 49cd8f25969d486ab4482b4986b1815a
@@ -1639,35 +1637,35 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.upstream.peer.responses.2xx.rate[{#UPSTREAM},{#PEER}]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.upstream.peer.responses.1xx.rate[{#UPSTREAM},{#PEER}]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.upstream.peer.responses.3xx.rate[{#UPSTREAM},{#PEER}]'
-
sortorder: '3'
drawtype: GRADIENT_LINE
color: A54F10
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.upstream.peer.responses.4xx.rate[{#UPSTREAM},{#PEER}]'
-
sortorder: '4'
drawtype: GRADIENT_LINE
color: FC6EA3
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.http.upstream.peer.responses.5xx.rate[{#UPSTREAM},{#PEER}]'
master_item:
key: nginx.http.upstreams
@@ -1686,8 +1684,6 @@ zabbix_export:
});
return JSON.stringify(output);
-
-
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
@@ -1758,7 +1754,7 @@ zabbix_export:
history: 7d
trends: '0'
value_type: CHAR
- description: 'The name of the shared memory zone that keeps the group’s configuration and run-time state.'
+ description: 'The name of the shared memory zone that keeps the group''s configuration and run-time state.'
preprocessing:
-
type: JSONPATH
@@ -1788,7 +1784,6 @@ zabbix_export:
});
return JSON.stringify(output);
-
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
@@ -2074,21 +2069,21 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.requests.name.rate[{#NAME}]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.requests.srv.rate[{#NAME}]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.requests.addr.rate[{#NAME}]'
-
uuid: e3535ce974114e6188752937c1bdb2c7
@@ -2098,56 +2093,56 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.responses.noerror.rate[{#NAME}]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.responses.formerr.rate[{#NAME}]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.responses.servfail.rate[{#NAME}]'
-
sortorder: '3'
drawtype: GRADIENT_LINE
color: A54F10
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.responses.nxdomain.rate[{#NAME}]'
-
sortorder: '4'
drawtype: GRADIENT_LINE
color: FC6EA3
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.responses.notimp.rate[{#NAME}]'
-
sortorder: '5'
drawtype: GRADIENT_LINE
color: 6C59DC
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.responses.refused.rate[{#NAME}]'
-
sortorder: '6'
drawtype: GRADIENT_LINE
color: AC8C14
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.responses.timedout.rate[{#NAME}]'
-
sortorder: '7'
drawtype: GRADIENT_LINE
color: 611F27
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.resolvers.responses.unknown.rate[{#NAME}]'
master_item:
key: nginx.resolvers
@@ -2163,7 +2158,6 @@ zabbix_export:
});
return JSON.stringify(output);
-
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
@@ -2399,21 +2393,21 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.stream.server_zones.sessions.2xx.rate[{#NAME}]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.stream.server_zones.sessions.4xx.rate[{#NAME}]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.stream.server_zones.sessions.5xx.rate[{#NAME}]'
master_item:
key: nginx.stream.server_zones
@@ -2429,7 +2423,6 @@ zabbix_export:
});
return JSON.stringify(output);
-
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
@@ -2708,7 +2701,7 @@ zabbix_export:
trigger_prototypes:
-
uuid: 20ef9b9e953246ef856dd46033595e52
- expression: 'find(/Template App Nginx Plus by HTTP/nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","up")=0 and find(/Template App Nginx Plus by HTTP/nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","down")=0'
+ expression: 'find(/Nginx Plus by HTTP/nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","up")=0 and find(/Nginx Plus by HTTP/nginx.stream.upstream.peer.state[{#UPSTREAM},{#PEER}],,"like","down")=0'
name: 'Nginx: Stream upstream server is not in UP or DOWN state.'
opdata: 'Current state: {ITEM.LASTVALUE1}'
priority: WARNING
@@ -2740,21 +2733,21 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.stream.upstream.peer.health_checks.checks[{#UPSTREAM},{#PEER}]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.stream.upstream.peer.health_checks.fails[{#UPSTREAM},{#PEER}]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: 'nginx.stream.upstream.peer.health_checks.unhealthy[{#UPSTREAM},{#PEER}]'
master_item:
key: nginx.stream.upstreams
@@ -2773,8 +2766,6 @@ zabbix_export:
});
return JSON.stringify(output);
-
-
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
@@ -2854,7 +2845,6 @@ zabbix_export:
});
return JSON.stringify(output);
-
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
@@ -2932,28 +2922,28 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.connections.accepted.rate
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.connections.dropped
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.connections.active
-
sortorder: '3'
drawtype: GRADIENT_LINE
color: A54F10
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.connections.idle
-
uuid: 4be7153e244b4c28a53c527aa9fd5706
@@ -2963,7 +2953,7 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.requests.total.rate
-
uuid: b1c647610c974bc8aad4fb55e1e3c8d3
@@ -2973,21 +2963,21 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.ssl.handshakes.rate
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.ssl.handshakes_failed.rate
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.ssl.session_reuses.rate
-
uuid: b2da29d4951745db8a66a076eb99d0aa
@@ -2996,5 +2986,5 @@ zabbix_export:
-
color: 1A7C11
item:
- host: 'Template App Nginx Plus by HTTP'
+ host: 'Nginx Plus by HTTP'
key: nginx.info.uptime
diff --git a/templates/app/php-fpm_agent/README.md b/templates/app/php-fpm_agent/README.md
index 335e23fe680..e99c2ce36d2 100644
--- a/templates/app/php-fpm_agent/README.md
+++ b/templates/app/php-fpm_agent/README.md
@@ -113,15 +113,15 @@ There are no template links in this template.
|PHP-FPM |PHP-FPM: Start time |<p>The time when this pool was started.</p> |DEPENDENT |php-fpm.start_time<p>**Preprocessing**:</p><p>- JSONPATH: `$.['start time']`</p> |
|PHP-FPM |PHP-FPM: Processes, total |<p>The total number of server processes currently running.</p> |DEPENDENT |php-fpm.processes_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.['total processes']`</p> |
|PHP-FPM |PHP-FPM: Processes, idle |<p>The total number of idle processes.</p> |DEPENDENT |php-fpm.processes_idle<p>**Preprocessing**:</p><p>- JSONPATH: `$.['idle processes']`</p> |
-|PHP-FPM |PHP-FPM: Queue usage |<p>Queue utilization</p> |CALCULATED |php-fpm.listen_queue_usage<p>**Expression**:</p>`last(php-fpm.listen_queue)/(last(php-fpm.listen_queue_len)+(last(php-fpm.listen_queue_len)=0))*100` |
+|PHP-FPM |PHP-FPM: Queue usage |<p>Queue utilization</p> |CALCULATED |php-fpm.listen_queue_usage<p>**Expression**:</p>`last(//php-fpm.listen_queue)/(last(//php-fpm.listen_queue_len)+(last(//php-fpm.listen_queue_len)=0))*100` |
|PHP-FPM |PHP-FPM: Process manager |<p>The method used by the process manager to control the number of child processes for this pool.</p> |DEPENDENT |php-fpm.process_manager<p>**Preprocessing**:</p><p>- JSONPATH: `$.['process manager']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|PHP-FPM |PHP-FPM: Processes, max active |<p>The highest value that 'active processes' has reached since the php-fpm server started.</p> |DEPENDENT |php-fpm.processes_max_active<p>**Preprocessing**:</p><p>- JSONPATH: `$.['max active processes']`</p> |
-|PHP-FPM |PHP-FPM: Accepted connections per second |<p>The number of accepted requests per second.</p> |DEPENDENT |php-fpm.conn_accepted.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.['accepted conn']`</p><p>- CHANGE_PER_SECOND |
-|PHP-FPM |PHP-FPM: Slow requests |<p>The number of requests that exceeded your request_slowlog_timeout value.</p> |DEPENDENT |php-fpm.slow_requests<p>**Preprocessing**:</p><p>- JSONPATH: `$.['slow requests']`</p><p>- SIMPLE_CHANGE |
+|PHP-FPM |PHP-FPM: Accepted connections per second |<p>The number of accepted requests per second.</p> |DEPENDENT |php-fpm.conn_accepted.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.['accepted conn']`</p><p>- CHANGE_PER_SECOND</p> |
+|PHP-FPM |PHP-FPM: Slow requests |<p>The number of requests that exceeded your request_slowlog_timeout value.</p> |DEPENDENT |php-fpm.slow_requests<p>**Preprocessing**:</p><p>- JSONPATH: `$.['slow requests']`</p><p>- SIMPLE_CHANGE</p> |
|PHP-FPM |PHP-FPM: Listen queue |<p>The current number of connections that have been initiated, but not yet accepted.</p> |DEPENDENT |php-fpm.listen_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.['listen queue']`</p> |
|PHP-FPM |PHP-FPM: Listen queue, max |<p>The maximum number of requests in the queue of pending connections since this FPM pool has started.</p> |DEPENDENT |php-fpm.listen_queue_max<p>**Preprocessing**:</p><p>- JSONPATH: `$.['max listen queue']`</p> |
|PHP-FPM |PHP-FPM: Listen queue, len |<p>Size of the socket queue of pending connections.</p> |DEPENDENT |php-fpm.listen_queue_len<p>**Preprocessing**:</p><p>- JSONPATH: `$.['listen queue len']`</p> |
-|PHP-FPM |PHP-FPM: Max children reached |<p>The number of times that pm.max_children has been reached since the php-fpm pool started </p> |DEPENDENT |php-fpm.max_children<p>**Preprocessing**:</p><p>- JSONPATH: `$.['max children reached']`</p><p>- SIMPLE_CHANGE |
+|PHP-FPM |PHP-FPM: Max children reached |<p>The number of times that pm.max_children has been reached since the php-fpm pool started </p> |DEPENDENT |php-fpm.max_children<p>**Preprocessing**:</p><p>- JSONPATH: `$.['max children reached']`</p><p>- SIMPLE_CHANGE</p> |
|Zabbix_raw_items |PHP-FPM: php-fpm_ping |<p>-</p> |ZABBIX_PASSIVE |web.page.get["{$PHP_FPM.HOST}","{$PHP_FPM.PING.PAGE}","{$PHP_FPM.PORT}"] |
|Zabbix_raw_items |PHP-FPM: Get status page |<p>-</p> |ZABBIX_PASSIVE |web.page.get["{$PHP_FPM.HOST}","{$PHP_FPM.STATUS.PAGE}?json","{$PHP_FPM.PORT}"]<p>**Preprocessing**:</p><p>- REGEX: `^[.\s\S]*({.+}) \1`</p> |
@@ -129,14 +129,14 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|PHP-FPM: Process is not running |<p>-</p> |`{TEMPLATE_NAME:proc.num["{$PHP_FPM.PROCESS_NAME}"].last()}=0` |HIGH | |
-|PHP-FPM: Service is down |<p>-</p> |`{TEMPLATE_NAME:php-fpm.ping.last()}=0 or {TEMPLATE_NAME:php-fpm.ping.nodata(3m)}=1` |HIGH |<p>Manual close: YES</p><p>**Depends on**:</p><p>- PHP-FPM: Process is not running</p> |
-|PHP-FPM: Version has changed (new version: {ITEM.VALUE}) |<p>PHP-FPM version has changed. Ack to close.</p> |`{TEMPLATE_NAME:php-fpm.version.diff()}=1 and {TEMPLATE_NAME:php-fpm.version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|PHP-FPM: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`{TEMPLATE_NAME:php-fpm.uptime.nodata(30m)}=1` |INFO |<p>Manual close: YES</p><p>**Depends on**:</p><p>- PHP-FPM: Process is not running</p> |
-|PHP-FPM: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:php-fpm.uptime.last()}<10m` |INFO |<p>Manual close: YES</p> |
-|PHP-FPM: Queue utilization is high (over {$PHP_FPM.QUEUE.WARN.MAX}% for 15m) |<p>The queue for this pool reached {$PHP_FPM.QUEUE.WARN.MAX}% of its maximum capacity. Items in queue represent the current number of connections that have been initiated on this pool, but not yet accepted.</p> |`{TEMPLATE_NAME:php-fpm.listen_queue_usage.min(15m)} > {$PHP_FPM.QUEUE.WARN.MAX} ` |WARNING | |
-|PHP-FPM: Manager changed (new value received: {ITEM.VALUE}) |<p>PHP-FPM manager changed. Ack to close.</p> |`{TEMPLATE_NAME:php-fpm.process_manager.diff()}=1` |INFO |<p>Manual close: YES</p> |
-|PHP-FPM: Detected slow requests |<p>PHP-FPM detected slow request. A slow request means that it took more time to execute than expected (defined in the configuration of your pool).</p> |`{TEMPLATE_NAME:php-fpm.slow_requests.min(#3)}>0 ` |WARNING | |
+|PHP-FPM: Process is not running |<p>-</p> |`last(/PHP-FPM by Zabbix agent/proc.num["{$PHP_FPM.PROCESS_NAME}"])=0` |HIGH | |
+|PHP-FPM: Service is down |<p>-</p> |`last(/PHP-FPM by Zabbix agent/php-fpm.ping)=0 or nodata(/PHP-FPM by Zabbix agent/php-fpm.ping,3m)=1` |HIGH |<p>Manual close: YES</p><p>**Depends on**:</p><p>- PHP-FPM: Process is not running</p> |
+|PHP-FPM: Version has changed (new version: {ITEM.VALUE}) |<p>PHP-FPM version has changed. Ack to close.</p> |`last(/PHP-FPM by Zabbix agent/php-fpm.version,#1)<>last(/PHP-FPM by Zabbix agent/php-fpm.version,#2) and length(last(/PHP-FPM by Zabbix agent/php-fpm.version))>0` |INFO |<p>Manual close: YES</p> |
+|PHP-FPM: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`nodata(/PHP-FPM by Zabbix agent/php-fpm.uptime,30m)=1` |INFO |<p>Manual close: YES</p><p>**Depends on**:</p><p>- PHP-FPM: Process is not running</p> |
+|PHP-FPM: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/PHP-FPM by Zabbix agent/php-fpm.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|PHP-FPM: Queue utilization is high (over {$PHP_FPM.QUEUE.WARN.MAX}% for 15m) |<p>The queue for this pool reached {$PHP_FPM.QUEUE.WARN.MAX}% of its maximum capacity. Items in queue represent the current number of connections that have been initiated on this pool, but not yet accepted.</p> |`min(/PHP-FPM by Zabbix agent/php-fpm.listen_queue_usage,15m) > {$PHP_FPM.QUEUE.WARN.MAX}` |WARNING | |
+|PHP-FPM: Manager changed (new value received: {ITEM.VALUE}) |<p>PHP-FPM manager changed. Ack to close.</p> |`last(/PHP-FPM by Zabbix agent/php-fpm.process_manager,#1)<>last(/PHP-FPM by Zabbix agent/php-fpm.process_manager,#2)` |INFO |<p>Manual close: YES</p> |
+|PHP-FPM: Detected slow requests |<p>PHP-FPM detected slow request. A slow request means that it took more time to execute than expected (defined in the configuration of your pool).</p> |`min(/PHP-FPM by Zabbix agent/php-fpm.slow_requests,#3)>0` |WARNING | |
## Feedback
diff --git a/templates/app/php-fpm_agent/template_app_php-fpm_agent.yaml b/templates/app/php-fpm_agent/template_app_php-fpm_agent.yaml
index c7645e93ec9..ef66c841e69 100644
--- a/templates/app/php-fpm_agent/template_app_php-fpm_agent.yaml
+++ b/templates/app/php-fpm_agent/template_app_php-fpm_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-06-18T12:22:25Z'
+ date: '2021-12-19T15:19:47Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -13,7 +13,7 @@ zabbix_export:
description: |
Get PHP-FPM metrics using Zabbix agent running on Linux.
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -116,7 +116,7 @@ zabbix_export:
triggers:
-
uuid: a11264f0e1174b7fb81b457dad1a745f
- expression: 'min(/PHP-FPM by Zabbix agent/php-fpm.listen_queue_usage,15m) > {$PHP_FPM.QUEUE.WARN.MAX}'
+ expression: 'min(/PHP-FPM by Zabbix agent/php-fpm.listen_queue_usage,15m) > {$PHP_FPM.QUEUE.WARN.MAX}'
name: 'PHP-FPM: Queue utilization is high (over {$PHP_FPM.QUEUE.WARN.MAX}% for 15m)'
priority: WARNING
description: 'The queue for this pool reached {$PHP_FPM.QUEUE.WARN.MAX}% of its maximum capacity. Items in queue represent the current number of connections that have been initiated on this pool, but not yet accepted.'
diff --git a/templates/app/php-fpm_http/README.md b/templates/app/php-fpm_http/README.md
index 88222f281ab..ff0af30ebc7 100644
--- a/templates/app/php-fpm_http/README.md
+++ b/templates/app/php-fpm_http/README.md
@@ -107,13 +107,13 @@ There are no template links in this template.
|PHP-FPM |PHP-FPM: Processes, idle |<p>The total number of idle processes.</p> |DEPENDENT |php-fpm.processes_idle<p>**Preprocessing**:</p><p>- JSONPATH: `$.['idle processes']`</p> |
|PHP-FPM |PHP-FPM: Process manager |<p>The method used by the process manager to control the number of child processes for this pool.</p> |DEPENDENT |php-fpm.process_manager<p>**Preprocessing**:</p><p>- JSONPATH: `$.['process manager']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|PHP-FPM |PHP-FPM: Processes, max active |<p>The highest value that 'active processes' has reached since the php-fpm server started.</p> |DEPENDENT |php-fpm.processes_max_active<p>**Preprocessing**:</p><p>- JSONPATH: `$.['max active processes']`</p> |
-|PHP-FPM |PHP-FPM: Accepted connections per second |<p>The number of accepted requests per second.</p> |DEPENDENT |php-fpm.conn_accepted.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.['accepted conn']`</p><p>- CHANGE_PER_SECOND |
-|PHP-FPM |PHP-FPM: Slow requests |<p>The number of requests that exceeded your request_slowlog_timeout value.</p> |DEPENDENT |php-fpm.slow_requests<p>**Preprocessing**:</p><p>- JSONPATH: `$.['slow requests']`</p><p>- SIMPLE_CHANGE |
+|PHP-FPM |PHP-FPM: Accepted connections per second |<p>The number of accepted requests per second.</p> |DEPENDENT |php-fpm.conn_accepted.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.['accepted conn']`</p><p>- CHANGE_PER_SECOND</p> |
+|PHP-FPM |PHP-FPM: Slow requests |<p>The number of requests that exceeded your request_slowlog_timeout value.</p> |DEPENDENT |php-fpm.slow_requests<p>**Preprocessing**:</p><p>- JSONPATH: `$.['slow requests']`</p><p>- SIMPLE_CHANGE</p> |
|PHP-FPM |PHP-FPM: Listen queue |<p>The current number of connections that have been initiated, but not yet accepted.</p> |DEPENDENT |php-fpm.listen_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.['listen queue']`</p> |
|PHP-FPM |PHP-FPM: Listen queue, max |<p>The maximum number of requests in the queue of pending connections since this FPM pool has started.</p> |DEPENDENT |php-fpm.listen_queue_max<p>**Preprocessing**:</p><p>- JSONPATH: `$.['max listen queue']`</p> |
|PHP-FPM |PHP-FPM: Listen queue, len |<p>Size of the socket queue of pending connections.</p> |DEPENDENT |php-fpm.listen_queue_len<p>**Preprocessing**:</p><p>- JSONPATH: `$.['listen queue len']`</p> |
-|PHP-FPM |PHP-FPM: Queue usage |<p>Queue utilization</p> |CALCULATED |php-fpm.listen_queue_usage<p>**Expression**:</p>`last(php-fpm.listen_queue)/(last(php-fpm.listen_queue_len)+(last(php-fpm.listen_queue_len)=0))*100` |
-|PHP-FPM |PHP-FPM: Max children reached |<p>The number of times that pm.max_children has been reached since the php-fpm pool started </p> |DEPENDENT |php-fpm.max_children<p>**Preprocessing**:</p><p>- JSONPATH: `$.['max children reached']`</p><p>- SIMPLE_CHANGE |
+|PHP-FPM |PHP-FPM: Queue usage |<p>Queue utilization</p> |CALCULATED |php-fpm.listen_queue_usage<p>**Expression**:</p>`last(//php-fpm.listen_queue)/(last(//php-fpm.listen_queue_len)+(last(//php-fpm.listen_queue_len)=0))*100` |
+|PHP-FPM |PHP-FPM: Max children reached |<p>The number of times that pm.max_children has been reached since the php-fpm pool started </p> |DEPENDENT |php-fpm.max_children<p>**Preprocessing**:</p><p>- JSONPATH: `$.['max children reached']`</p><p>- SIMPLE_CHANGE</p> |
|Zabbix_raw_items |PHP-FPM: Get ping page |<p>-</p> |HTTP_AGENT |php-fpm.get_ping |
|Zabbix_raw_items |PHP-FPM: Get status page |<p>-</p> |HTTP_AGENT |php-fpm.get_status |
@@ -121,13 +121,13 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|PHP-FPM: Service is down |<p>-</p> |`{TEMPLATE_NAME:php-fpm.ping.last()}=0 or {TEMPLATE_NAME:php-fpm.ping.nodata(3m)}=1` |HIGH |<p>Manual close: YES</p> |
-|PHP-FPM: Version has changed (new version: {ITEM.VALUE}) |<p>PHP-FPM version has changed. Ack to close.</p> |`{TEMPLATE_NAME:php-fpm.version.diff()}=1 and {TEMPLATE_NAME:php-fpm.version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|PHP-FPM: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`{TEMPLATE_NAME:php-fpm.uptime.nodata(30m)}=1` |INFO |<p>Manual close: YES</p><p>**Depends on**:</p><p>- PHP-FPM: Service is down</p> |
-|PHP-FPM: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:php-fpm.uptime.last()}<10m` |INFO |<p>Manual close: YES</p> |
-|PHP-FPM: Manager changed (new value received: {ITEM.VALUE}) |<p>PHP-FPM manager changed. Ack to close.</p> |`{TEMPLATE_NAME:php-fpm.process_manager.diff()}=1` |INFO |<p>Manual close: YES</p> |
-|PHP-FPM: Detected slow requests |<p>PHP-FPM detected slow request. A slow request means that it took more time to execute than expected (defined in the configuration of your pool).</p> |`{TEMPLATE_NAME:php-fpm.slow_requests.min(#3)}>0 ` |WARNING | |
-|PHP-FPM: Queue utilization is high (over {$PHP_FPM.QUEUE.WARN.MAX}% for 15m) |<p>The queue for this pool reached {$PHP_FPM.QUEUE.WARN.MAX}% of its maximum capacity. Items in queue represent the current number of connections that have been initiated on this pool, but not yet accepted.</p> |`{TEMPLATE_NAME:php-fpm.listen_queue_usage.min(15m)} > {$PHP_FPM.QUEUE.WARN.MAX} ` |WARNING | |
+|PHP-FPM: Service is down |<p>-</p> |`last(/PHP-FPM by HTTP/php-fpm.ping)=0 or nodata(/PHP-FPM by HTTP/php-fpm.ping,3m)=1` |HIGH |<p>Manual close: YES</p> |
+|PHP-FPM: Version has changed (new version: {ITEM.VALUE}) |<p>PHP-FPM version has changed. Ack to close.</p> |`last(/PHP-FPM by HTTP/php-fpm.version,#1)<>last(/PHP-FPM by HTTP/php-fpm.version,#2) and length(last(/PHP-FPM by HTTP/php-fpm.version))>0` |INFO |<p>Manual close: YES</p> |
+|PHP-FPM: Failed to fetch info data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`nodata(/PHP-FPM by HTTP/php-fpm.uptime,30m)=1` |INFO |<p>Manual close: YES</p><p>**Depends on**:</p><p>- PHP-FPM: Service is down</p> |
+|PHP-FPM: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/PHP-FPM by HTTP/php-fpm.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|PHP-FPM: Manager changed (new value received: {ITEM.VALUE}) |<p>PHP-FPM manager changed. Ack to close.</p> |`last(/PHP-FPM by HTTP/php-fpm.process_manager,#1)<>last(/PHP-FPM by HTTP/php-fpm.process_manager,#2)` |INFO |<p>Manual close: YES</p> |
+|PHP-FPM: Detected slow requests |<p>PHP-FPM detected slow request. A slow request means that it took more time to execute than expected (defined in the configuration of your pool).</p> |`min(/PHP-FPM by HTTP/php-fpm.slow_requests,#3)>0` |WARNING | |
+|PHP-FPM: Queue utilization is high (over {$PHP_FPM.QUEUE.WARN.MAX}% for 15m) |<p>The queue for this pool reached {$PHP_FPM.QUEUE.WARN.MAX}% of its maximum capacity. Items in queue represent the current number of connections that have been initiated on this pool, but not yet accepted.</p> |`min(/PHP-FPM by HTTP/php-fpm.listen_queue_usage,15m) > {$PHP_FPM.QUEUE.WARN.MAX}` |WARNING | |
## Feedback
diff --git a/templates/app/php-fpm_http/template_app_php-fpm_http.yaml b/templates/app/php-fpm_http/template_app_php-fpm_http.yaml
index eae11ae6dba..44079d458ae 100644
--- a/templates/app/php-fpm_http/template_app_php-fpm_http.yaml
+++ b/templates/app/php-fpm_http/template_app_php-fpm_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-06-18T12:21:11Z'
+ date: '2021-12-19T15:19:47Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -13,7 +13,7 @@ zabbix_export:
description: |
Get PHP-FPM metrics using the Zabbix HTTP agent.
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -143,7 +143,7 @@ zabbix_export:
triggers:
-
uuid: 1ad5018fb19d4a9cb9e627f964bfd627
- expression: 'min(/PHP-FPM by HTTP/php-fpm.listen_queue_usage,15m) > {$PHP_FPM.QUEUE.WARN.MAX}'
+ expression: 'min(/PHP-FPM by HTTP/php-fpm.listen_queue_usage,15m) > {$PHP_FPM.QUEUE.WARN.MAX}'
name: 'PHP-FPM: Queue utilization is high (over {$PHP_FPM.QUEUE.WARN.MAX}% for 15m)'
priority: WARNING
description: 'The queue for this pool reached {$PHP_FPM.QUEUE.WARN.MAX}% of its maximum capacity. Items in queue represent the current number of connections that have been initiated on this pool, but not yet accepted.'
diff --git a/templates/app/rabbitmq_agent/README.md b/templates/app/rabbitmq_agent/README.md
index d06d9907e75..2a32d45a6a2 100644
--- a/templates/app/rabbitmq_agent/README.md
+++ b/templates/app/rabbitmq_agent/README.md
@@ -19,7 +19,7 @@ This template was tested on:
> See [Zabbix template operation](https://www.zabbix.com/documentation/5.4/manual/config/templates_out_of_the_box/zabbix_agent) for basic instructions.
-Enable the RabbitMQ management plugin. See [RabbitMQ’s documentation](https://www.rabbitmq.com/management.html) to enable it.
+Enable the RabbitMQ management plugin. See [RabbitMQ's documentation](https://www.rabbitmq.com/management.html) to enable it.
Create a user to monitor the service:
@@ -48,14 +48,14 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|---------------------------------------------|------------------------------------------------------------|--------------------|
-| {$RABBITMQ.API.CLUSTER_HOST} | <p>The hostname or IP of RabbitMQ cluster API endpoint</p> | `127.0.0.1` |
-| {$RABBITMQ.API.PASSWORD} | <p>-</p> | `zabbix` |
-| {$RABBITMQ.API.PORT} | <p>The port of RabbitMQ API endpoint</p> | `15672` |
-| {$RABBITMQ.API.USER} | <p>-</p> | `zbx_monitor` |
-| {$RABBITMQ.LLD.FILTER.EXCHANGE.MATCHES} | <p>Filter of discoverable exchanges</p> | `.*` |
-| {$RABBITMQ.LLD.FILTER.EXCHANGE.NOT_MATCHES} | <p>Filter to exclude discovered exchanges</p> | `CHANGE_IF_NEEDED` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$RABBITMQ.API.CLUSTER_HOST} |<p>The hostname or IP of RabbitMQ cluster API endpoint</p> |`127.0.0.1` |
+|{$RABBITMQ.API.PASSWORD} |<p>-</p> |`zabbix` |
+|{$RABBITMQ.API.PORT} |<p>The port of RabbitMQ API endpoint</p> |`15672` |
+|{$RABBITMQ.API.USER} |<p>-</p> |`zbx_monitor` |
+|{$RABBITMQ.LLD.FILTER.EXCHANGE.MATCHES} |<p>Filter of discoverable exchanges</p> |`.*` |
+|{$RABBITMQ.LLD.FILTER.EXCHANGE.NOT_MATCHES} |<p>Filter to exclude discovered exchanges</p> |`CHANGE_IF_NEEDED` |
## Template links
@@ -63,65 +63,65 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|--------------------------------|-----------------------------------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Health Check 3.8.10+ discovery | <p>Version 3.8.10+ specific metrics</p> | DEPENDENT | rabbitmq.healthcheck.v3810.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Exchanges discovery | <p>Individual exchange metrics</p> | DEPENDENT | rabbitmq.exchanges.discovery<p>**Filter**:</p>AND <p>- A: {#EXCHANGE} MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.EXCHANGE.MATCHES}`</p><p>- B: {#EXCHANGE} NOT_MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.EXCHANGE.NOT_MATCHES}`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Health Check 3.8.10+ discovery |<p>Version 3.8.10+ specific metrics</p> |DEPENDENT |rabbitmq.healthcheck.v3810.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Exchanges discovery |<p>Individual exchange metrics</p> |DEPENDENT |rabbitmq.exchanges.discovery<p>**Filter**:</p>AND <p>- {#EXCHANGE} MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.EXCHANGE.MATCHES}`</p><p>- {#EXCHANGE} NOT_MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.EXCHANGE.NOT_MATCHES}`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| RabbitMQ | RabbitMQ: Connections total | <p>Total number of connections</p> | DEPENDENT | rabbitmq.overview.object_totals.connections<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.connections`</p> |
-| RabbitMQ | RabbitMQ: Channels total | <p>Total number of channels</p> | DEPENDENT | rabbitmq.overview.object_totals.channels<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.channels`</p> |
-| RabbitMQ | RabbitMQ: Queues total | <p>Total number of queues</p> | DEPENDENT | rabbitmq.overview.object_totals.queues<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.queues`</p> |
-| RabbitMQ | RabbitMQ: Consumers total | <p>Total number of consumers</p> | DEPENDENT | rabbitmq.overview.object_totals.consumers<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.consumers`</p> |
-| RabbitMQ | RabbitMQ: Exchanges total | <p>Total number of exchanges</p> | DEPENDENT | rabbitmq.overview.object_totals.exchanges<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.exchanges`</p> |
-| RabbitMQ | RabbitMQ: Messages total | <p>Total number of messages (ready plus unacknowledged)</p> | DEPENDENT | rabbitmq.overview.queue_totals.messages<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages`</p> |
-| RabbitMQ | RabbitMQ: Messages ready for delivery | <p>Number of messages ready for deliver</p> | DEPENDENT | rabbitmq.overview.queue_totals.messages.ready<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages_ready`</p> |
-| RabbitMQ | RabbitMQ: Messages unacknowledged | <p>Number of unacknowledged messages</p> | DEPENDENT | rabbitmq.overview.queue_totals.messages.unacknowledged<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages_unacknowledged`</p> |
-| RabbitMQ | RabbitMQ: Messages acknowledged | <p>Number of messages delivered to clients and acknowledged</p> | DEPENDENT | rabbitmq.overview.messages.ack<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.ack`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages acknowledged per second | <p>Rate of messages delivered to clients and acknowledged per second</p> | DEPENDENT | rabbitmq.overview.messages.ack.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.ack_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages confirmed | <p>Count of messages confirmed</p> | DEPENDENT | rabbitmq.overview.messages.confirm<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.confirm`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages confirmed per second | <p>Rate of messages confirmed per second</p> | DEPENDENT | rabbitmq.overview.messages.confirm.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.confirm_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages delivered | <p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.overview.messages.deliver_get<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.deliver_get`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages delivered per second | <p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.overview.messages.deliver_get.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.deliver_get_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages published | <p>Count of messages published</p> | DEPENDENT | rabbitmq.overview.messages.publish<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages published per second | <p>Rate of messages published per second</p> | DEPENDENT | rabbitmq.overview.messages.publish.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages publish_in | <p>Count of messages published from channels into this overview</p> | DEPENDENT | rabbitmq.overview.messages.publish_in<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_in`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages publish_in per second | <p>Rate of messages published from channels into this overview per sec</p> | DEPENDENT | rabbitmq.overview.messages.publish_in.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_in_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages publish_out | <p>Count of messages published from this overview into queues</p> | DEPENDENT | rabbitmq.overview.messages.publish_out<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_out`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages publish_out per second | <p>Rate of messages published from this overview into queues per second,0,rabbitmq,total msgs pub out rate</p> | DEPENDENT | rabbitmq.overview.messages.publish_out.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_out_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages returned unroutable | <p>Count of messages returned to publisher as unroutable</p> | DEPENDENT | rabbitmq.overview.messages.return_unroutable<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.return_unroutable`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages returned unroutable per second | <p>Rate of messages returned to publisher as unroutable per second</p> | DEPENDENT | rabbitmq.overview.messages.return_unroutable.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.return_unroutable_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages returned redeliver | <p>Count of subset of messages in deliver_get which had the redelivered flag set</p> | DEPENDENT | rabbitmq.overview.messages.redeliver<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.redeliver`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages returned redeliver per second | <p>Rate of subset of messages in deliver_get which had the redelivered flag set per second</p> | DEPENDENT | rabbitmq.overview.messages.redeliver.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.redeliver_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: alarms in effect in the cluster{#SINGLETON} | <p>Responds a 200 OK if there are no alarms in effect in the cluster, otherwise responds with a 503 Service Unavailable.</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/health/checks/alarms{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages acknowledged | <p>Number of messages delivered to clients and acknowledged</p> | DEPENDENT | rabbitmq.exchange.messages.ack["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.ack.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages acknowledged per second | <p>Rate of messages delivered to clients and acknowledged per second</p> | DEPENDENT | rabbitmq.exchange.messages.ack.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.ack_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages confirmed | <p>Count of messages confirmed</p> | DEPENDENT | rabbitmq.exchange.messages.confirm["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.confirm.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages confirmed per second | <p>Rate of messages confirmed per second</p> | DEPENDENT | rabbitmq.exchange.messages.confirm.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.confirm_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages delivered | <p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.exchange.messages.deliver_get["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.deliver_get.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages delivered per second | <p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.exchange.messages.deliver_get.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.deliver_get_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages published | <p>Count of messages published</p> | DEPENDENT | rabbitmq.exchange.messages.publish["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages published per second | <p>Rate of messages published per second</p> | DEPENDENT | rabbitmq.exchange.messages.publish.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_in | <p>Count of messages published from channels into this overview</p> | DEPENDENT | rabbitmq.exchange.messages.publish_in["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_in.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_in per second | <p>Rate of messages published from channels into this overview per sec</p> | DEPENDENT | rabbitmq.exchange.messages.publish_in.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_in_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_out | <p>Count of messages published from this overview into queues</p> | DEPENDENT | rabbitmq.exchange.messages.publish_out["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_out.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_out per second | <p>Rate of messages published from this overview into queues per second,0,rabbitmq,total msgs pub out rate</p> | DEPENDENT | rabbitmq.exchange.messages.publish_out.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_out_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages returned unroutable | <p>Count of messages returned to publisher as unroutable</p> | DEPENDENT | rabbitmq.exchange.messages.return_unroutable["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.return_unroutable.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages returned unroutable per second | <p>Rate of messages returned to publisher as unroutable per second</p> | DEPENDENT | rabbitmq.exchange.messages.return_unroutable.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.return_unroutable_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages redelivered | <p>Count of subset of messages in deliver_get which had the redelivered flag set</p> | DEPENDENT | rabbitmq.exchange.messages.redeliver["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.redeliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages redelivered per second | <p>Rate of subset of messages in deliver_get which had the redelivered flag set per second</p> | DEPENDENT | rabbitmq.exchange.messages.redeliver.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.redeliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| Zabbix_raw_items | RabbitMQ: Get overview | <p>The HTTP API endpoint that returns cluster-wide metrics</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/overview"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
-| Zabbix_raw_items | RabbitMQ: Get exchanges | <p>The HTTP API endpoint that returns exchanges metrics</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/exchanges"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|RabbitMQ |RabbitMQ: Connections total |<p>Total number of connections</p> |DEPENDENT |rabbitmq.overview.object_totals.connections<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.connections`</p> |
+|RabbitMQ |RabbitMQ: Channels total |<p>Total number of channels</p> |DEPENDENT |rabbitmq.overview.object_totals.channels<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.channels`</p> |
+|RabbitMQ |RabbitMQ: Queues total |<p>Total number of queues</p> |DEPENDENT |rabbitmq.overview.object_totals.queues<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.queues`</p> |
+|RabbitMQ |RabbitMQ: Consumers total |<p>Total number of consumers</p> |DEPENDENT |rabbitmq.overview.object_totals.consumers<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.consumers`</p> |
+|RabbitMQ |RabbitMQ: Exchanges total |<p>Total number of exchanges</p> |DEPENDENT |rabbitmq.overview.object_totals.exchanges<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.exchanges`</p> |
+|RabbitMQ |RabbitMQ: Messages total |<p>Total number of messages (ready plus unacknowledged)</p> |DEPENDENT |rabbitmq.overview.queue_totals.messages<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages`</p> |
+|RabbitMQ |RabbitMQ: Messages ready for delivery |<p>Number of messages ready for deliver</p> |DEPENDENT |rabbitmq.overview.queue_totals.messages.ready<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages_ready`</p> |
+|RabbitMQ |RabbitMQ: Messages unacknowledged |<p>Number of unacknowledged messages</p> |DEPENDENT |rabbitmq.overview.queue_totals.messages.unacknowledged<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages_unacknowledged`</p> |
+|RabbitMQ |RabbitMQ: Messages acknowledged |<p>Number of messages delivered to clients and acknowledged</p> |DEPENDENT |rabbitmq.overview.messages.ack<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.ack`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages acknowledged per second |<p>Rate of messages delivered to clients and acknowledged per second</p> |DEPENDENT |rabbitmq.overview.messages.ack.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.ack_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages confirmed |<p>Count of messages confirmed</p> |DEPENDENT |rabbitmq.overview.messages.confirm<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.confirm`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages confirmed per second |<p>Rate of messages confirmed per second</p> |DEPENDENT |rabbitmq.overview.messages.confirm.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.confirm_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages delivered |<p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.overview.messages.deliver_get<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.deliver_get`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages delivered per second |<p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.overview.messages.deliver_get.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.deliver_get_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages published |<p>Count of messages published</p> |DEPENDENT |rabbitmq.overview.messages.publish<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages published per second |<p>Rate of messages published per second</p> |DEPENDENT |rabbitmq.overview.messages.publish.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages publish_in |<p>Count of messages published from channels into this overview</p> |DEPENDENT |rabbitmq.overview.messages.publish_in<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_in`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages publish_in per second |<p>Rate of messages published from channels into this overview per sec</p> |DEPENDENT |rabbitmq.overview.messages.publish_in.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_in_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages publish_out |<p>Count of messages published from this overview into queues</p> |DEPENDENT |rabbitmq.overview.messages.publish_out<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_out`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages publish_out per second |<p>Rate of messages published from this overview into queues per second,0,rabbitmq,total msgs pub out rate</p> |DEPENDENT |rabbitmq.overview.messages.publish_out.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_out_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages returned unroutable |<p>Count of messages returned to publisher as unroutable</p> |DEPENDENT |rabbitmq.overview.messages.return_unroutable<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.return_unroutable`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages returned unroutable per second |<p>Rate of messages returned to publisher as unroutable per second</p> |DEPENDENT |rabbitmq.overview.messages.return_unroutable.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.return_unroutable_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages returned redeliver |<p>Count of subset of messages in deliver_get which had the redelivered flag set</p> |DEPENDENT |rabbitmq.overview.messages.redeliver<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.redeliver`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages returned redeliver per second |<p>Rate of subset of messages in deliver_get which had the redelivered flag set per second</p> |DEPENDENT |rabbitmq.overview.messages.redeliver.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.redeliver_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: alarms in effect in the cluster{#SINGLETON} |<p>Responds a 200 OK if there are no alarms in effect in the cluster, otherwise responds with a 503 Service Unavailable.</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/health/checks/alarms{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages acknowledged |<p>Number of messages delivered to clients and acknowledged</p> |DEPENDENT |rabbitmq.exchange.messages.ack["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.ack.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages acknowledged per second |<p>Rate of messages delivered to clients and acknowledged per second</p> |DEPENDENT |rabbitmq.exchange.messages.ack.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.ack_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages confirmed |<p>Count of messages confirmed</p> |DEPENDENT |rabbitmq.exchange.messages.confirm["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.confirm.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages confirmed per second |<p>Rate of messages confirmed per second</p> |DEPENDENT |rabbitmq.exchange.messages.confirm.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.confirm_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages delivered |<p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.exchange.messages.deliver_get["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.deliver_get.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages delivered per second |<p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.exchange.messages.deliver_get.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.deliver_get_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages published |<p>Count of messages published</p> |DEPENDENT |rabbitmq.exchange.messages.publish["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages published per second |<p>Rate of messages published per second</p> |DEPENDENT |rabbitmq.exchange.messages.publish.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_in |<p>Count of messages published from channels into this overview</p> |DEPENDENT |rabbitmq.exchange.messages.publish_in["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_in.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_in per second |<p>Rate of messages published from channels into this overview per sec</p> |DEPENDENT |rabbitmq.exchange.messages.publish_in.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_in_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_out |<p>Count of messages published from this overview into queues</p> |DEPENDENT |rabbitmq.exchange.messages.publish_out["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_out.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_out per second |<p>Rate of messages published from this overview into queues per second,0,rabbitmq,total msgs pub out rate</p> |DEPENDENT |rabbitmq.exchange.messages.publish_out.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_out_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages returned unroutable |<p>Count of messages returned to publisher as unroutable</p> |DEPENDENT |rabbitmq.exchange.messages.return_unroutable["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.return_unroutable.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages returned unroutable per second |<p>Rate of messages returned to publisher as unroutable per second</p> |DEPENDENT |rabbitmq.exchange.messages.return_unroutable.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.return_unroutable_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages redelivered |<p>Count of subset of messages in deliver_get which had the redelivered flag set</p> |DEPENDENT |rabbitmq.exchange.messages.redeliver["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.redeliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages redelivered per second |<p>Rate of subset of messages in deliver_get which had the redelivered flag set per second</p> |DEPENDENT |rabbitmq.exchange.messages.redeliver.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.redeliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|Zabbix_raw_items |RabbitMQ: Get overview |<p>The HTTP API endpoint that returns cluster-wide metrics</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/overview"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
+|Zabbix_raw_items |RabbitMQ: Get exchanges |<p>The HTTP API endpoint that returns exchanges metrics</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/exchanges"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|--------------------------------------------------------------|-----------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|----------------------------------|
-| RabbitMQ: There are active alarms in the cluster | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/health/checks/alarms{#SINGLETON}"].last()}=503` | AVERAGE | |
-| RabbitMQ: Failed to fetch overview data (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/overview"].nodata(30m)}=1` | WARNING | <p>Manual close: YES</p> |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|RabbitMQ: There are active alarms in the cluster |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ cluster by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/health/checks/alarms{#SINGLETON}"])=0` |AVERAGE | |
+|RabbitMQ: Failed to fetch overview data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`nodata(/RabbitMQ cluster by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/overview"],30m)=1` |WARNING |<p>Manual close: YES</p> |
## Feedback
@@ -137,7 +137,7 @@ For Zabbix version: 5.4 and higher
The template to monitor RabbitMQ by Zabbix that work without any external scripts.
Most of the metrics are collected in one go, thanks to Zabbix bulk data collection.
-Template `RabbitMQ Node` — (Zabbix version >= 4.2) collects metrics by polling [RabbitMQ management plugin](https://www.rabbitmq.com/management.html) with Zabbix agent.
+Template `RabbitMQ Node` — (Zabbix version >= 4.2) collects metrics by polling [RabbitMQ management plugin](https://www.rabbitmq.com/management.html) with Zabbix agent.
It also uses Zabbix agent to collect `RabbitMQ` Linux process stats like CPU usage, memory usage and whether process is running or not.
@@ -149,7 +149,7 @@ This template was tested on:
## Setup
-Enable the RabbitMQ management plugin. See [RabbitMQ’s documentation](https://www.rabbitmq.com/management.html) to enable it.
+Enable the RabbitMQ management plugin. See [RabbitMQ's documentation](https://www.rabbitmq.com/management.html) to enable it.
Create a user to monitor the service:
@@ -174,18 +174,18 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|------------------------------------------|-------------------------------------------------------------------------|--------------------|
-| {$RABBITMQ.API.HOST} | <p>The hostname or IP of RabbitMQ API endpoint</p> | `127.0.0.1` |
-| {$RABBITMQ.API.PASSWORD} | <p>-</p> | `zabbix` |
-| {$RABBITMQ.API.PORT} | <p>The port of RabbitMQ API endpoint</p> | `15672` |
-| {$RABBITMQ.API.USER} | <p>-</p> | `zbx_monitor` |
-| {$RABBITMQ.CLUSTER.NAME} | <p>The name of RabbitMQ cluster</p> | `rabbit` |
-| {$RABBITMQ.LLD.FILTER.QUEUE.MATCHES} | <p>Filter of discoverable queues</p> | `.*` |
-| {$RABBITMQ.LLD.FILTER.QUEUE.NOT_MATCHES} | <p>Filter to exclude discovered queues</p> | `CHANGE_IF_NEEDED` |
-| {$RABBITMQ.MESSAGES.MAX.WARN} | <p>Maximum number of messages in the queue for trigger expression</p> | `1000` |
-| {$RABBITMQ.PROCESS_NAME} | <p>RabbitMQ server process name</p> | `beam.smp` |
-| {$RABBITMQ.RESPONSE_TIME.MAX.WARN} | <p>Maximum RabbitMQ response time in seconds for trigger expression</p> | `10` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$RABBITMQ.API.HOST} |<p>The hostname or IP of RabbitMQ API endpoint</p> |`127.0.0.1` |
+|{$RABBITMQ.API.PASSWORD} |<p>-</p> |`zabbix` |
+|{$RABBITMQ.API.PORT} |<p>The port of RabbitMQ API endpoint</p> |`15672` |
+|{$RABBITMQ.API.USER} |<p>-</p> |`zbx_monitor` |
+|{$RABBITMQ.CLUSTER.NAME} |<p>The name of RabbitMQ cluster</p> |`rabbit` |
+|{$RABBITMQ.LLD.FILTER.QUEUE.MATCHES} |<p>Filter of discoverable queues</p> |`.*` |
+|{$RABBITMQ.LLD.FILTER.QUEUE.NOT_MATCHES} |<p>Filter to exclude discovered queues</p> |`CHANGE_IF_NEEDED` |
+|{$RABBITMQ.MESSAGES.MAX.WARN} |<p>Maximum number of messages in the queue for trigger expression</p> |`1000` |
+|{$RABBITMQ.PROCESS_NAME} |<p>RabbitMQ server process name</p> |`beam.smp` |
+|{$RABBITMQ.RESPONSE_TIME.MAX.WARN} |<p>Maximum RabbitMQ response time in seconds for trigger expression</p> |`10` |
## Template links
@@ -193,86 +193,86 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|--------------------------------|-----------------------------------------------------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Health Check 3.8.10+ discovery | <p>Version 3.8.10+ specific metrics</p> | DEPENDENT | rabbitmq.healthcheck.v3810.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Health Check 3.8.9- discovery | <p>Specific metrics up to and including version 3.8.4</p> | DEPENDENT | rabbitmq.healthcheck.v389.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Queues discovery | <p>Individual queue metrics</p> | DEPENDENT | rabbitmq.queues.discovery<p>**Filter**:</p>AND <p>- A: {#QUEUE} MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.QUEUE.MATCHES}`</p><p>- B: {#QUEUE} NOT_MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.QUEUE.NOT_MATCHES}`</p><p>- C: {#NODE} MATCHES_REGEX `{$RABBITMQ.CLUSTER.NAME}@{HOST.NAME}`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Health Check 3.8.10+ discovery |<p>Version 3.8.10+ specific metrics</p> |DEPENDENT |rabbitmq.healthcheck.v3810.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Health Check 3.8.9- discovery |<p>Specific metrics up to and including version 3.8.4</p> |DEPENDENT |rabbitmq.healthcheck.v389.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Queues discovery |<p>Individual queue metrics</p> |DEPENDENT |rabbitmq.queues.discovery<p>**Filter**:</p>AND <p>- {#QUEUE} MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.QUEUE.MATCHES}`</p><p>- {#QUEUE} NOT_MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.QUEUE.NOT_MATCHES}`</p><p>- {#NODE} MATCHES_REGEX `{$RABBITMQ.CLUSTER.NAME}@{HOST.NAME}`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| RabbitMQ | RabbitMQ: Management plugin version | <p>Version of the management plugin in use</p> | DEPENDENT | rabbitmq.node.overview.management_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| RabbitMQ | RabbitMQ: RabbitMQ version | <p>Version of RabbitMQ on the node which processed this request</p> | DEPENDENT | rabbitmq.node.overview.rabbitmq_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.rabbitmq_version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| RabbitMQ | RabbitMQ: Used file descriptors | <p>Used file descriptors</p> | DEPENDENT | rabbitmq.node.fd_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.fd_used`</p> |
-| RabbitMQ | RabbitMQ: Free disk space | <p>Current free disk space</p> | DEPENDENT | rabbitmq.node.disk_free<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free`</p> |
-| RabbitMQ | RabbitMQ: Memory used | <p>Memory used in bytes</p> | DEPENDENT | rabbitmq.node.mem_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_used`</p> |
-| RabbitMQ | RabbitMQ: Memory limit | <p>Memory usage high watermark in bytes</p> | DEPENDENT | rabbitmq.node.mem_limit<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_limit`</p> |
-| RabbitMQ | RabbitMQ: Disk free limit | <p>Disk free space limit in bytes</p> | DEPENDENT | rabbitmq.node.disk_free_limit<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free_limit`</p> |
-| RabbitMQ | RabbitMQ: Runtime run queue | <p>Average number of Erlang processes waiting to run</p> | DEPENDENT | rabbitmq.node.run_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.run_queue`</p> |
-| RabbitMQ | RabbitMQ: Sockets used | <p>Number of file descriptors used as sockets</p> | DEPENDENT | rabbitmq.node.sockets_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.sockets_used`</p> |
-| RabbitMQ | RabbitMQ: Sockets available | <p>File descriptors available for use as sockets</p> | DEPENDENT | rabbitmq.node.sockets_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.sockets_total`</p> |
-| RabbitMQ | RabbitMQ: Number of network partitions | <p>Number of network partitions this node is seeing</p> | DEPENDENT | rabbitmq.node.partitions<p>**Preprocessing**:</p><p>- JSONPATH: `$.partitions`</p><p>- JAVASCRIPT: `return JSON.parse(value).length;`</p> |
-| RabbitMQ | RabbitMQ: Is running | <p>Is the node running or not</p> | DEPENDENT | rabbitmq.node.running<p>**Preprocessing**:</p><p>- JSONPATH: `$.running`</p><p>- BOOL_TO_DECIMAL |
-| RabbitMQ | RabbitMQ: Memory alarm | <p>Does the host has memory alarm</p> | DEPENDENT | rabbitmq.node.mem_alarm<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_alarm`</p><p>- BOOL_TO_DECIMAL |
-| RabbitMQ | RabbitMQ: Disk free alarm | <p>Does the node have disk alarm</p> | DEPENDENT | rabbitmq.node.disk_free_alarm<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free_alarm`</p><p>- BOOL_TO_DECIMAL |
-| RabbitMQ | RabbitMQ: Uptime | <p>Uptime in milliseconds</p> | DEPENDENT | rabbitmq.node.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.uptime`</p><p>- MULTIPLIER: `0.001`</p> |
-| RabbitMQ | RabbitMQ: Number of processes running | <p>-</p> | ZABBIX_PASSIVE | proc.num["{$RABBITMQ.PROCESS_NAME}"] |
-| RabbitMQ | RabbitMQ: Memory usage (rss) | <p>Resident set size memory used by process in bytes.</p> | ZABBIX_PASSIVE | proc.mem["{$RABBITMQ.PROCESS_NAME}",,,,rss] |
-| RabbitMQ | RabbitMQ: Memory usage (vsize) | <p>Virtual memory size used by process in bytes.</p> | ZABBIX_PASSIVE | proc.mem["{$RABBITMQ.PROCESS_NAME}",,,,vsize] |
-| RabbitMQ | RabbitMQ: CPU utilization | <p>Process CPU utilization percentage.</p> | ZABBIX_PASSIVE | proc.cpu.util["{$RABBITMQ.PROCESS_NAME}"] |
-| RabbitMQ | RabbitMQ: Service ping | <p>-</p> | ZABBIX_PASSIVE | net.tcp.service[http,"{$RABBITMQ.API.HOST}","{$RABBITMQ.API.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| RabbitMQ | RabbitMQ: Service response time | <p>-</p> | ZABBIX_PASSIVE | net.tcp.service.perf[http,"{$RABBITMQ.API.HOST}","{$RABBITMQ.API.PORT}"] |
-| RabbitMQ | RabbitMQ: Healthcheck: local alarms in effect on the this node{#SINGLETON} | <p>Responds a 200 OK if there are no local alarms in effect on the target node, otherwise responds with a 503 Service Unavailable.</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/local-alarms{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: expiration date on the certificates{#SINGLETON} | <p>Checks the expiration date on the certificates for every listener configured to use TLS. Responds a 200 OK if all certificates are valid (have not expired), otherwise responds with a 503 Service Unavailable.</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/certificate-expiration/1/months{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: virtual hosts on the this node{#SINGLETON} | <p>Responds a 200 OK if all virtual hosts and running on the target node, otherwise responds with a 503 Service Unavailable.</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/virtual-hosts{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: classic mirrored queues without synchronised mirrors online{#SINGLETON} | <p>Checks if there are classic mirrored queues without synchronised mirrors online (queues that would potentially lose data if the target node is shut down). Responds a 200 OK if there are no such classic mirrored queues, otherwise responds with a 503 Service Unavailable.</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-mirror-sync-critical{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: queues with minimum online quorum{#SINGLETON} | <p>Checks if there are quorum queues with minimum online quorum (queues that would lose their quorum and availability if the target node is shut down). Responds a 200 OK if there are no such quorum queues, otherwise responds with a 503 Service Unavailable.</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-quorum-critical{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck{#SINGLETON} | <p>Runs basic healthchecks in the current node. Checks that the rabbit application is running, channels and queues can be listed successfully, and that no alarms are in effect.</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/healthchecks/node{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p><p>- JSONPATH: `$.status`</p><p>- BOOL_TO_DECIMAL |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages | <p>Count of the total messages in the queue</p> | DEPENDENT | rabbitmq.queue.messages["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages per second | <p>Count per second of the total messages in the queue</p> | DEPENDENT | rabbitmq.queue.messages.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_details.rate.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Consumers | <p>Number of consumers</p> | DEPENDENT | rabbitmq.queue.consumers["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].consumers.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Memory | <p>Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures</p> | DEPENDENT | rabbitmq.queue.memory["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].memory.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages ready | <p>Number of messages ready to be delivered to clients</p> | DEPENDENT | rabbitmq.queue.messages_ready["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_ready.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages ready per second | <p>Number per second of messages ready to be delivered to clients</p> | DEPENDENT | rabbitmq.queue.messages_ready.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_ready_details.rate.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages unacknowledged | <p>Number of messages delivered to clients but not yet acknowledged</p> | DEPENDENT | rabbitmq.queue.messages_unacknowledged["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_unacknowledged.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages unacknowledged per second | <p>Number per second of messages delivered to clients but not yet acknowledged</p> | DEPENDENT | rabbitmq.queue.messages_unacknowledged.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_unacknowledged_details.rate.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages acknowledged | <p>Number of messages delivered to clients and acknowledged</p> | DEPENDENT | rabbitmq.queue.messages.ack["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.ack.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages acknowledged per second | <p>Number per second of messages delivered to clients and acknowledged</p> | DEPENDENT | rabbitmq.queue.messages.ack.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.ack_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered | <p>Count of messages delivered in acknowledgement mode to consumers</p> | DEPENDENT | rabbitmq.queue.messages.deliver["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered per second | <p>Count of messages delivered in acknowledgement mode to consumers</p> | DEPENDENT | rabbitmq.queue.messages.deliver.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered | <p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.queue.messages.deliver_get["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_get.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered per second | <p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.queue.messages.deliver_get.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_get_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages published | <p>Count of messages published</p> | DEPENDENT | rabbitmq.queue.messages.publish["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.publish.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages published per second | <p>Rate per second of messages published</p> | DEPENDENT | rabbitmq.queue.messages.publish.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.publish_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages redelivered | <p>Count of subset of messages in deliver_get which had the redelivered flag set</p> | DEPENDENT | rabbitmq.queue.messages.redeliver["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.redeliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages redelivered per second | <p>Rate per second of subset of messages in deliver_get which had the redelivered flag set</p> | DEPENDENT | rabbitmq.queue.messages.redeliver.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.redeliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| Zabbix_raw_items | RabbitMQ: Get node overview | <p>The HTTP API endpoint that returns cluster-wide metrics</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/overview"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
-| Zabbix_raw_items | RabbitMQ: Get nodes | <p>The HTTP API endpoint that returns nodes metrics</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/nodes/{$RABBITMQ.CLUSTER.NAME}@{HOST.NAME}?memory=true"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
-| Zabbix_raw_items | RabbitMQ: Get queues | <p>The HTTP API endpoint that returns queues metrics</p> | ZABBIX_PASSIVE | web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/queues"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|RabbitMQ |RabbitMQ: Management plugin version |<p>Version of the management plugin in use</p> |DEPENDENT |rabbitmq.node.overview.management_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|RabbitMQ |RabbitMQ: RabbitMQ version |<p>Version of RabbitMQ on the node which processed this request</p> |DEPENDENT |rabbitmq.node.overview.rabbitmq_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.rabbitmq_version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|RabbitMQ |RabbitMQ: Used file descriptors |<p>Used file descriptors</p> |DEPENDENT |rabbitmq.node.fd_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.fd_used`</p> |
+|RabbitMQ |RabbitMQ: Free disk space |<p>Current free disk space</p> |DEPENDENT |rabbitmq.node.disk_free<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free`</p> |
+|RabbitMQ |RabbitMQ: Memory used |<p>Memory used in bytes</p> |DEPENDENT |rabbitmq.node.mem_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_used`</p> |
+|RabbitMQ |RabbitMQ: Memory limit |<p>Memory usage high watermark in bytes</p> |DEPENDENT |rabbitmq.node.mem_limit<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_limit`</p> |
+|RabbitMQ |RabbitMQ: Disk free limit |<p>Disk free space limit in bytes</p> |DEPENDENT |rabbitmq.node.disk_free_limit<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free_limit`</p> |
+|RabbitMQ |RabbitMQ: Runtime run queue |<p>Average number of Erlang processes waiting to run</p> |DEPENDENT |rabbitmq.node.run_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.run_queue`</p> |
+|RabbitMQ |RabbitMQ: Sockets used |<p>Number of file descriptors used as sockets</p> |DEPENDENT |rabbitmq.node.sockets_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.sockets_used`</p> |
+|RabbitMQ |RabbitMQ: Sockets available |<p>File descriptors available for use as sockets</p> |DEPENDENT |rabbitmq.node.sockets_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.sockets_total`</p> |
+|RabbitMQ |RabbitMQ: Number of network partitions |<p>Number of network partitions this node is seeing</p> |DEPENDENT |rabbitmq.node.partitions<p>**Preprocessing**:</p><p>- JSONPATH: `$.partitions`</p><p>- JAVASCRIPT: `return JSON.parse(value).length;`</p> |
+|RabbitMQ |RabbitMQ: Is running |<p>Is the node running or not</p> |DEPENDENT |rabbitmq.node.running<p>**Preprocessing**:</p><p>- JSONPATH: `$.running`</p><p>- BOOL_TO_DECIMAL</p> |
+|RabbitMQ |RabbitMQ: Memory alarm |<p>Does the host has memory alarm</p> |DEPENDENT |rabbitmq.node.mem_alarm<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_alarm`</p><p>- BOOL_TO_DECIMAL</p> |
+|RabbitMQ |RabbitMQ: Disk free alarm |<p>Does the node have disk alarm</p> |DEPENDENT |rabbitmq.node.disk_free_alarm<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free_alarm`</p><p>- BOOL_TO_DECIMAL</p> |
+|RabbitMQ |RabbitMQ: Uptime |<p>Uptime in milliseconds</p> |DEPENDENT |rabbitmq.node.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.uptime`</p><p>- MULTIPLIER: `0.001`</p> |
+|RabbitMQ |RabbitMQ: Number of processes running |<p>-</p> |ZABBIX_PASSIVE |proc.num["{$RABBITMQ.PROCESS_NAME}"] |
+|RabbitMQ |RabbitMQ: Memory usage (rss) |<p>Resident set size memory used by process in bytes.</p> |ZABBIX_PASSIVE |proc.mem["{$RABBITMQ.PROCESS_NAME}",,,,rss] |
+|RabbitMQ |RabbitMQ: Memory usage (vsize) |<p>Virtual memory size used by process in bytes.</p> |ZABBIX_PASSIVE |proc.mem["{$RABBITMQ.PROCESS_NAME}",,,,vsize] |
+|RabbitMQ |RabbitMQ: CPU utilization |<p>Process CPU utilization percentage.</p> |ZABBIX_PASSIVE |proc.cpu.util["{$RABBITMQ.PROCESS_NAME}"] |
+|RabbitMQ |RabbitMQ: Service ping |<p>-</p> |ZABBIX_PASSIVE |net.tcp.service[http,"{$RABBITMQ.API.HOST}","{$RABBITMQ.API.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|RabbitMQ |RabbitMQ: Service response time |<p>-</p> |ZABBIX_PASSIVE |net.tcp.service.perf[http,"{$RABBITMQ.API.HOST}","{$RABBITMQ.API.PORT}"] |
+|RabbitMQ |RabbitMQ: Healthcheck: local alarms in effect on the this node{#SINGLETON} |<p>Responds a 200 OK if there are no local alarms in effect on the target node, otherwise responds with a 503 Service Unavailable.</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/local-alarms{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: expiration date on the certificates{#SINGLETON} |<p>Checks the expiration date on the certificates for every listener configured to use TLS. Responds a 200 OK if all certificates are valid (have not expired), otherwise responds with a 503 Service Unavailable.</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/certificate-expiration/1/months{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: virtual hosts on the this node{#SINGLETON} |<p>Responds a 200 OK if all virtual hosts and running on the target node, otherwise responds with a 503 Service Unavailable.</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/virtual-hosts{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: classic mirrored queues without synchronised mirrors online{#SINGLETON} |<p>Checks if there are classic mirrored queues without synchronised mirrors online (queues that would potentially lose data if the target node is shut down). Responds a 200 OK if there are no such classic mirrored queues, otherwise responds with a 503 Service Unavailable.</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-mirror-sync-critical{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: queues with minimum online quorum{#SINGLETON} |<p>Checks if there are quorum queues with minimum online quorum (queues that would lose their quorum and availability if the target node is shut down). Responds a 200 OK if there are no such quorum queues, otherwise responds with a 503 Service Unavailable.</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-quorum-critical{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck{#SINGLETON} |<p>Runs basic healthchecks in the current node. Checks that the rabbit application is running, channels and queues can be listed successfully, and that no alarms are in effect.</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/healthchecks/node{#SINGLETON}"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p><p>- JSONPATH: `$.status`</p><p>- BOOL_TO_DECIMAL</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages |<p>Count of the total messages in the queue</p> |DEPENDENT |rabbitmq.queue.messages["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages per second |<p>Count per second of the total messages in the queue</p> |DEPENDENT |rabbitmq.queue.messages.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_details.rate.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Consumers |<p>Number of consumers</p> |DEPENDENT |rabbitmq.queue.consumers["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].consumers.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Memory |<p>Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures</p> |DEPENDENT |rabbitmq.queue.memory["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].memory.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages ready |<p>Number of messages ready to be delivered to clients</p> |DEPENDENT |rabbitmq.queue.messages_ready["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_ready.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages ready per second |<p>Number per second of messages ready to be delivered to clients</p> |DEPENDENT |rabbitmq.queue.messages_ready.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_ready_details.rate.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages unacknowledged |<p>Number of messages delivered to clients but not yet acknowledged</p> |DEPENDENT |rabbitmq.queue.messages_unacknowledged["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_unacknowledged.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages unacknowledged per second |<p>Number per second of messages delivered to clients but not yet acknowledged</p> |DEPENDENT |rabbitmq.queue.messages_unacknowledged.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_unacknowledged_details.rate.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages acknowledged |<p>Number of messages delivered to clients and acknowledged</p> |DEPENDENT |rabbitmq.queue.messages.ack["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.ack.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages acknowledged per second |<p>Number per second of messages delivered to clients and acknowledged</p> |DEPENDENT |rabbitmq.queue.messages.ack.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.ack_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered |<p>Count of messages delivered in acknowledgement mode to consumers</p> |DEPENDENT |rabbitmq.queue.messages.deliver["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered per second |<p>Count of messages delivered in acknowledgement mode to consumers</p> |DEPENDENT |rabbitmq.queue.messages.deliver.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered |<p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.queue.messages.deliver_get["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_get.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered per second |<p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.queue.messages.deliver_get.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_get_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages published |<p>Count of messages published</p> |DEPENDENT |rabbitmq.queue.messages.publish["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.publish.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages published per second |<p>Rate per second of messages published</p> |DEPENDENT |rabbitmq.queue.messages.publish.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.publish_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages redelivered |<p>Count of subset of messages in deliver_get which had the redelivered flag set</p> |DEPENDENT |rabbitmq.queue.messages.redeliver["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.redeliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages redelivered per second |<p>Rate per second of subset of messages in deliver_get which had the redelivered flag set</p> |DEPENDENT |rabbitmq.queue.messages.redeliver.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.redeliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|Zabbix_raw_items |RabbitMQ: Get node overview |<p>The HTTP API endpoint that returns cluster-wide metrics</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/overview"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
+|Zabbix_raw_items |RabbitMQ: Get nodes |<p>The HTTP API endpoint that returns nodes metrics</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/nodes/{$RABBITMQ.CLUSTER.NAME}@{HOST.NAME}?memory=true"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
+|Zabbix_raw_items |RabbitMQ: Get queues |<p>The HTTP API endpoint that returns queues metrics</p> |ZABBIX_PASSIVE |web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/queues"]<p>**Preprocessing**:</p><p>- REGEX: `\n\s?\n(.*) \1`</p> |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|---------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------------------------------------------------------------------------------------------------------------------|
-| RabbitMQ: Version has changed (new version: {ITEM.VALUE}) | <p>RabbitMQ version has changed. Ack to close.</p> | `{TEMPLATE_NAME:rabbitmq.node.overview.rabbitmq_version.diff()}=1 and {TEMPLATE_NAME:rabbitmq.node.overview.rabbitmq_version.strlen()}>0` | INFO | <p>Manual close: YES</p> |
-| RabbitMQ: Number of network partitions is too high (more than 0 for 5m) | <p>https://www.rabbitmq.com/partitions.html#detecting</p> | `{TEMPLATE_NAME:rabbitmq.node.partitions.min(5m)}>0` | WARNING | |
-| RabbitMQ: Node is not running | <p>RabbitMQ node is not running</p> | `{TEMPLATE_NAME:rabbitmq.node.running.max(5m)}=0` | AVERAGE | <p>**Depends on**:</p><p>- RabbitMQ: Process is not running</p><p>- RabbitMQ: Service is down</p> |
-| RabbitMQ: Memory alarm (Memory usage threshold has been reached) | <p>https://www.rabbitmq.com/memory.html</p> | `{TEMPLATE_NAME:rabbitmq.node.mem_alarm.last()}=1` | AVERAGE | |
-| RabbitMQ: Free disk space alarm (Free space threshold has been reached) | <p>https://www.rabbitmq.com/disk-alarms.html</p> | `{TEMPLATE_NAME:rabbitmq.node.disk_free_alarm.last()}=1` | AVERAGE | |
-| RabbitMQ: has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:rabbitmq.node.uptime.last()}<10m` | INFO | <p>Manual close: YES</p> |
-| RabbitMQ: Process is not running | <p>-</p> | `{TEMPLATE_NAME:proc.num["{$RABBITMQ.PROCESS_NAME}"].last()}=0` | HIGH | |
-| RabbitMQ: Service is down | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service[http,"{$RABBITMQ.API.HOST}","{$RABBITMQ.API.PORT}"].last()}=0` | AVERAGE | <p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Process is not running</p> |
-| RabbitMQ: Service response time is too high (over {$RABBITMQ.RESPONSE_TIME.MAX.WARN}s for 5m) | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service.perf[http,"{$RABBITMQ.API.HOST}","{$RABBITMQ.API.PORT}"].min(5m)}>{$RABBITMQ.RESPONSE_TIME.MAX.WARN}` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Process is not running</p><p>- RabbitMQ: Service is down</p> |
-| RabbitMQ: There are active alarms in the node | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/local-alarms{#SINGLETON}"].last()}=503` | AVERAGE | |
-| RabbitMQ: There are valid TLS certificates expiring in the next month | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/certificate-expiration/1/months{#SINGLETON}"].last()}=503` | AVERAGE | |
-| RabbitMQ: There are not running virtual hosts | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/virtual-hosts{#SINGLETON}"].last()}=503` | AVERAGE | |
-| RabbitMQ: There are queues that could potentially lose data if the this node goes offline. | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-mirror-sync-critical{#SINGLETON}"].last()}=503` | AVERAGE | |
-| RabbitMQ: There are queues that would lose their quorum and availability if the this node is shut down. | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-quorum-critical{#SINGLETON}"].last()}=503` | AVERAGE | |
-| RabbitMQ: Node healthcheck failed | <p>https://www.rabbitmq.com/monitoring.html#health-checks</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/healthchecks/node{#SINGLETON}"].last()}=0` | AVERAGE | |
-| RabbitMQ: Too many messages in queue (over {$RABBITMQ.MESSAGES.MAX.WARN} for 5m) | <p>-</p> | `{TEMPLATE_NAME:rabbitmq.queue.messages["{#VHOST}/{#QUEUE}"].min(5m)}>{$RABBITMQ.MESSAGES.MAX.WARN:"{#QUEUE}"}` | WARNING | |
-| RabbitMQ: Failed to fetch nodes data (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes.</p> | `{TEMPLATE_NAME:web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/nodes/{$RABBITMQ.CLUSTER.NAME}@{HOST.NAME}?memory=true"].nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Process is not running</p><p>- RabbitMQ: Service is down</p> |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|RabbitMQ: Version has changed (new version: {ITEM.VALUE}) |<p>RabbitMQ version has changed. Ack to close.</p> |`last(/RabbitMQ node by Zabbix agent/rabbitmq.node.overview.rabbitmq_version,#1)<>last(/RabbitMQ node by Zabbix agent/rabbitmq.node.overview.rabbitmq_version,#2) and length(last(/RabbitMQ node by Zabbix agent/rabbitmq.node.overview.rabbitmq_version))>0` |INFO |<p>Manual close: YES</p> |
+|RabbitMQ: Number of network partitions is too high (more than 0 for 5m) |<p>https://www.rabbitmq.com/partitions.html#detecting</p> |`min(/RabbitMQ node by Zabbix agent/rabbitmq.node.partitions,5m)>0` |WARNING | |
+|RabbitMQ: Node is not running |<p>RabbitMQ node is not running</p> |`max(/RabbitMQ node by Zabbix agent/rabbitmq.node.running,5m)=0` |AVERAGE |<p>**Depends on**:</p><p>- RabbitMQ: Process is not running</p><p>- RabbitMQ: Service is down</p> |
+|RabbitMQ: Memory alarm (Memory usage threshold has been reached) |<p>https://www.rabbitmq.com/memory.html</p> |`last(/RabbitMQ node by Zabbix agent/rabbitmq.node.mem_alarm)=1` |AVERAGE | |
+|RabbitMQ: Free disk space alarm (Free space threshold has been reached) |<p>https://www.rabbitmq.com/disk-alarms.html</p> |`last(/RabbitMQ node by Zabbix agent/rabbitmq.node.disk_free_alarm)=1` |AVERAGE | |
+|RabbitMQ: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/RabbitMQ node by Zabbix agent/rabbitmq.node.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|RabbitMQ: Process is not running |<p>-</p> |`last(/RabbitMQ node by Zabbix agent/proc.num["{$RABBITMQ.PROCESS_NAME}"])=0` |HIGH | |
+|RabbitMQ: Service is down |<p>-</p> |`last(/RabbitMQ node by Zabbix agent/net.tcp.service[http,"{$RABBITMQ.API.HOST}","{$RABBITMQ.API.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Process is not running</p> |
+|RabbitMQ: Service response time is too high (over {$RABBITMQ.RESPONSE_TIME.MAX.WARN}s for 5m) |<p>-</p> |`min(/RabbitMQ node by Zabbix agent/net.tcp.service.perf[http,"{$RABBITMQ.API.HOST}","{$RABBITMQ.API.PORT}"],5m)>{$RABBITMQ.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Process is not running</p><p>- RabbitMQ: Service is down</p> |
+|RabbitMQ: There are active alarms in the node |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/local-alarms{#SINGLETON}"])=0` |AVERAGE | |
+|RabbitMQ: There are valid TLS certificates expiring in the next month |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/certificate-expiration/1/months{#SINGLETON}"])=0` |AVERAGE | |
+|RabbitMQ: There are not running virtual hosts |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/virtual-hosts{#SINGLETON}"])=0` |AVERAGE | |
+|RabbitMQ: There are queues that could potentially lose data if the this node goes offline. |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-mirror-sync-critical{#SINGLETON}"])=0` |AVERAGE | |
+|RabbitMQ: There are queues that would lose their quorum and availability if the this node is shut down. |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-quorum-critical{#SINGLETON}"])=0` |AVERAGE | |
+|RabbitMQ: Node healthcheck failed |<p>https://www.rabbitmq.com/monitoring.html#health-checks</p> |`last(/RabbitMQ node by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/healthchecks/node{#SINGLETON}"])=0` |AVERAGE | |
+|RabbitMQ: Too many messages in queue (over {$RABBITMQ.MESSAGES.MAX.WARN} for 5m) |<p>-</p> |`min(/RabbitMQ node by Zabbix agent/rabbitmq.queue.messages["{#VHOST}/{#QUEUE}"],5m)>{$RABBITMQ.MESSAGES.MAX.WARN:"{#QUEUE}"}` |WARNING | |
+|RabbitMQ: Failed to fetch nodes data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/RabbitMQ node by Zabbix agent/web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/nodes/{$RABBITMQ.CLUSTER.NAME}@{HOST.NAME}?memory=true"],30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Process is not running</p><p>- RabbitMQ: Service is down</p> |
## Feedback
diff --git a/templates/app/rabbitmq_agent/template_app_rabbitmq_agent.yaml b/templates/app/rabbitmq_agent/template_app_rabbitmq_agent.yaml
index 13601a93885..2fe2d293367 100644
--- a/templates/app/rabbitmq_agent/template_app_rabbitmq_agent.yaml
+++ b/templates/app/rabbitmq_agent/template_app_rabbitmq_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:21Z'
+ date: '2021-12-19T15:19:48Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -15,7 +15,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/387226-discussion-thread-for-official-zabbix-template-rabbitmq
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -950,6 +950,7 @@ zabbix_export:
name: 'RabbitMQ: Healthcheck: alarms in effect in the cluster{#SINGLETON}'
key: 'web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.CLUSTER_HOST}:{$RABBITMQ.API.PORT}/api/health/checks/alarms{#SINGLETON}"]'
history: 7d
+ authtype: NONE
username: '{$RABBITMQ.API.USER}'
password: '{$RABBITMQ.API.PASSWORD}'
description: 'Responds a 200 OK if there are no alarms in effect in the cluster, otherwise responds with a 503 Service Unavailable.'
@@ -1001,7 +1002,7 @@ zabbix_export:
- |
var rabbit_version = parseInt(value.split('.')[0]) * 10000 +
parseInt(value.split('.')[1]) * 100 +
- parseInt(value.split('.')[2])
+ parseInt(value.split('.')[2])
return JSON.stringify(rabbit_version >= 30810 ? [{'{#SINGLETON}': ''}] : []);
macros:
-
@@ -1136,7 +1137,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/387226-discussion-thread-for-official-zabbix-template-rabbitmq
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -1753,7 +1754,7 @@ zabbix_export:
- |
var rabbit_version = parseInt(value.split('.')[0]) * 10000 +
parseInt(value.split('.')[1]) * 100 +
- parseInt(value.split('.')[2])
+ parseInt(value.split('.')[2])
return JSON.stringify(rabbit_version < 30810 ? [{'{#SINGLETON}': ''}] : []);
-
uuid: 3c0021a3e57142eb806deb5b47b6700a
@@ -1768,6 +1769,7 @@ zabbix_export:
name: 'RabbitMQ: Healthcheck: expiration date on the certificates{#SINGLETON}'
key: 'web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/certificate-expiration/1/months{#SINGLETON}"]'
history: 7d
+ authtype: NONE
username: '{$RABBITMQ.API.USER}'
password: '{$RABBITMQ.API.PASSWORD}'
description: 'Checks the expiration date on the certificates for every listener configured to use TLS. Responds a 200 OK if all certificates are valid (have not expired), otherwise responds with a 503 Service Unavailable.'
@@ -1810,6 +1812,7 @@ zabbix_export:
name: 'RabbitMQ: Healthcheck: local alarms in effect on the this node{#SINGLETON}'
key: 'web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/local-alarms{#SINGLETON}"]'
history: 7d
+ authtype: NONE
username: '{$RABBITMQ.API.USER}'
password: '{$RABBITMQ.API.PASSWORD}'
description: 'Responds a 200 OK if there are no local alarms in effect on the target node, otherwise responds with a 503 Service Unavailable.'
@@ -1852,6 +1855,7 @@ zabbix_export:
name: 'RabbitMQ: Healthcheck: classic mirrored queues without synchronised mirrors online{#SINGLETON}'
key: 'web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-mirror-sync-critical{#SINGLETON}"]'
history: 7d
+ authtype: NONE
username: '{$RABBITMQ.API.USER}'
password: '{$RABBITMQ.API.PASSWORD}'
description: 'Checks if there are classic mirrored queues without synchronised mirrors online (queues that would potentially lose data if the target node is shut down). Responds a 200 OK if there are no such classic mirrored queues, otherwise responds with a 503 Service Unavailable.'
@@ -1894,6 +1898,7 @@ zabbix_export:
name: 'RabbitMQ: Healthcheck: queues with minimum online quorum{#SINGLETON}'
key: 'web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/node-is-quorum-critical{#SINGLETON}"]'
history: 7d
+ authtype: NONE
username: '{$RABBITMQ.API.USER}'
password: '{$RABBITMQ.API.PASSWORD}'
description: 'Checks if there are quorum queues with minimum online quorum (queues that would lose their quorum and availability if the target node is shut down). Responds a 200 OK if there are no such quorum queues, otherwise responds with a 503 Service Unavailable.'
@@ -1936,6 +1941,7 @@ zabbix_export:
name: 'RabbitMQ: Healthcheck: virtual hosts on the this node{#SINGLETON}'
key: 'web.page.get["http://{$RABBITMQ.API.USER}:{$RABBITMQ.API.PASSWORD}@{$RABBITMQ.API.HOST}:{$RABBITMQ.API.PORT}/api/health/checks/virtual-hosts{#SINGLETON}"]'
history: 7d
+ authtype: NONE
username: '{$RABBITMQ.API.USER}'
password: '{$RABBITMQ.API.PASSWORD}'
description: 'Responds a 200 OK if all virtual hosts and running on the target node, otherwise responds with a 503 Service Unavailable.'
@@ -1986,7 +1992,7 @@ zabbix_export:
- |
var rabbit_version = parseInt(value.split('.')[0]) * 10000 +
parseInt(value.split('.')[1]) * 100 +
- parseInt(value.split('.')[2])
+ parseInt(value.split('.')[2])
return JSON.stringify(rabbit_version >= 30810 ? [{'{#SINGLETON}': ''}] : []);
-
uuid: d1d32fc6ae1f445a98144a31214ca825
diff --git a/templates/app/rabbitmq_http/README.md b/templates/app/rabbitmq_http/README.md
index 36d9a386faf..dfec58a27b1 100644
--- a/templates/app/rabbitmq_http/README.md
+++ b/templates/app/rabbitmq_http/README.md
@@ -19,7 +19,7 @@ This template was tested on:
> See [Zabbix template operation](https://www.zabbix.com/documentation/5.4/manual/config/templates_out_of_the_box/http) for basic instructions.
-Enable the RabbitMQ management plugin. See [RabbitMQ’s documentation](https://www.rabbitmq.com/management.html) to enable it.
+Enable the RabbitMQ management plugin. See [RabbitMQ's documentation](https://www.rabbitmq.com/management.html) to enable it.
Create a user to monitor the service:
@@ -41,14 +41,14 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|---------------------------------------------|--------------------------------------------------|--------------------|
-| {$RABBITMQ.API.PASSWORD} | <p>-</p> | `zabbix` |
-| {$RABBITMQ.API.PORT} | <p>The port of RabbitMQ API endpoint</p> | `15672` |
-| {$RABBITMQ.API.SCHEME} | <p>Request scheme which may be http or https</p> | `http` |
-| {$RABBITMQ.API.USER} | <p>-</p> | `zbx_monitor` |
-| {$RABBITMQ.LLD.FILTER.EXCHANGE.MATCHES} | <p>Filter of discoverable exchanges</p> | `.*` |
-| {$RABBITMQ.LLD.FILTER.EXCHANGE.NOT_MATCHES} | <p>Filter to exclude discovered exchanges</p> | `CHANGE_IF_NEEDED` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$RABBITMQ.API.PASSWORD} |<p>-</p> |`zabbix` |
+|{$RABBITMQ.API.PORT} |<p>The port of RabbitMQ API endpoint</p> |`15672` |
+|{$RABBITMQ.API.SCHEME} |<p>Request scheme which may be http or https</p> |`http` |
+|{$RABBITMQ.API.USER} |<p>-</p> |`zbx_monitor` |
+|{$RABBITMQ.LLD.FILTER.EXCHANGE.MATCHES} |<p>Filter of discoverable exchanges</p> |`.*` |
+|{$RABBITMQ.LLD.FILTER.EXCHANGE.NOT_MATCHES} |<p>Filter to exclude discovered exchanges</p> |`CHANGE_IF_NEEDED` |
## Template links
@@ -56,65 +56,65 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|--------------------------------|-----------------------------------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Health Check 3.8.10+ discovery | <p>Version 3.8.10+ specific metrics</p> | DEPENDENT | rabbitmq.healthcheck.v3810.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Exchanges discovery | <p>Individual exchange metrics</p> | DEPENDENT | rabbitmq.exchanges.discovery<p>**Filter**:</p>AND <p>- A: {#EXCHANGE} MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.EXCHANGE.MATCHES}`</p><p>- B: {#EXCHANGE} NOT_MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.EXCHANGE.NOT_MATCHES}`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Health Check 3.8.10+ discovery |<p>Version 3.8.10+ specific metrics</p> |DEPENDENT |rabbitmq.healthcheck.v3810.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Exchanges discovery |<p>Individual exchange metrics</p> |DEPENDENT |rabbitmq.exchanges.discovery<p>**Filter**:</p>AND <p>- {#EXCHANGE} MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.EXCHANGE.MATCHES}`</p><p>- {#EXCHANGE} NOT_MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.EXCHANGE.NOT_MATCHES}`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| RabbitMQ | RabbitMQ: Connections total | <p>Total number of connections</p> | DEPENDENT | rabbitmq.overview.object_totals.connections<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.connections`</p> |
-| RabbitMQ | RabbitMQ: Channels total | <p>Total number of channels</p> | DEPENDENT | rabbitmq.overview.object_totals.channels<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.channels`</p> |
-| RabbitMQ | RabbitMQ: Queues total | <p>Total number of queues</p> | DEPENDENT | rabbitmq.overview.object_totals.queues<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.queues`</p> |
-| RabbitMQ | RabbitMQ: Consumers total | <p>Total number of consumers</p> | DEPENDENT | rabbitmq.overview.object_totals.consumers<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.consumers`</p> |
-| RabbitMQ | RabbitMQ: Exchanges total | <p>Total number of exchanges</p> | DEPENDENT | rabbitmq.overview.object_totals.exchanges<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.exchanges`</p> |
-| RabbitMQ | RabbitMQ: Messages total | <p>Total number of messages (ready plus unacknowledged)</p> | DEPENDENT | rabbitmq.overview.queue_totals.messages<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages`</p> |
-| RabbitMQ | RabbitMQ: Messages ready for delivery | <p>Number of messages ready for deliver</p> | DEPENDENT | rabbitmq.overview.queue_totals.messages.ready<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages_ready`</p> |
-| RabbitMQ | RabbitMQ: Messages unacknowledged | <p>Number of unacknowledged messages</p> | DEPENDENT | rabbitmq.overview.queue_totals.messages.unacknowledged<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages_unacknowledged`</p> |
-| RabbitMQ | RabbitMQ: Messages acknowledged | <p>Number of messages delivered to clients and acknowledged</p> | DEPENDENT | rabbitmq.overview.messages.ack<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.ack`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages acknowledged per second | <p>Rate of messages delivered to clients and acknowledged per second</p> | DEPENDENT | rabbitmq.overview.messages.ack.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.ack_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages confirmed | <p>Count of messages confirmed</p> | DEPENDENT | rabbitmq.overview.messages.confirm<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.confirm`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages confirmed per second | <p>Rate of messages confirmed per second</p> | DEPENDENT | rabbitmq.overview.messages.confirm.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.confirm_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages delivered | <p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.overview.messages.deliver_get<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.deliver_get`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages delivered per second | <p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.overview.messages.deliver_get.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.deliver_get_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages published | <p>Count of messages published</p> | DEPENDENT | rabbitmq.overview.messages.publish<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages published per second | <p>Rate of messages published per second</p> | DEPENDENT | rabbitmq.overview.messages.publish.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages publish_in | <p>Count of messages published from channels into this overview</p> | DEPENDENT | rabbitmq.overview.messages.publish_in<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_in`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages publish_in per second | <p>Rate of messages published from channels into this overview per sec</p> | DEPENDENT | rabbitmq.overview.messages.publish_in.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_in_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages publish_out | <p>Count of messages published from this overview into queues</p> | DEPENDENT | rabbitmq.overview.messages.publish_out<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_out`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages publish_out per second | <p>Rate of messages published from this overview into queues per second,0,rabbitmq,total msgs pub out rate</p> | DEPENDENT | rabbitmq.overview.messages.publish_out.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_out_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages returned unroutable | <p>Count of messages returned to publisher as unroutable</p> | DEPENDENT | rabbitmq.overview.messages.return_unroutable<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.return_unroutable`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages returned unroutable per second | <p>Rate of messages returned to publisher as unroutable per second</p> | DEPENDENT | rabbitmq.overview.messages.return_unroutable.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.return_unroutable_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages returned redeliver | <p>Count of subset of messages in deliver_get which had the redelivered flag set</p> | DEPENDENT | rabbitmq.overview.messages.redeliver<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.redeliver`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Messages returned redeliver per second | <p>Rate of subset of messages in deliver_get which had the redelivered flag set per second</p> | DEPENDENT | rabbitmq.overview.messages.redeliver.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.redeliver_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: alarms in effect in the cluster{#SINGLETON} | <p>Responds a 200 OK if there are no alarms in effect in the cluster, otherwise responds with a 503 Service Unavailable.</p> | HTTP_AGENT | rabbitmq.healthcheck.alarms[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages acknowledged | <p>Number of messages delivered to clients and acknowledged</p> | DEPENDENT | rabbitmq.exchange.messages.ack["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.ack.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages acknowledged per second | <p>Rate of messages delivered to clients and acknowledged per second</p> | DEPENDENT | rabbitmq.exchange.messages.ack.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.ack_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages confirmed | <p>Count of messages confirmed</p> | DEPENDENT | rabbitmq.exchange.messages.confirm["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.confirm.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages confirmed per second | <p>Rate of messages confirmed per second</p> | DEPENDENT | rabbitmq.exchange.messages.confirm.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.confirm_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages delivered | <p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.exchange.messages.deliver_get["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.deliver_get.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages delivered per second | <p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.exchange.messages.deliver_get.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.deliver_get_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages published | <p>Count of messages published</p> | DEPENDENT | rabbitmq.exchange.messages.publish["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages published per second | <p>Rate of messages published per second</p> | DEPENDENT | rabbitmq.exchange.messages.publish.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_in | <p>Count of messages published from channels into this overview</p> | DEPENDENT | rabbitmq.exchange.messages.publish_in["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_in.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_in per second | <p>Rate of messages published from channels into this overview per sec</p> | DEPENDENT | rabbitmq.exchange.messages.publish_in.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_in_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_out | <p>Count of messages published from this overview into queues</p> | DEPENDENT | rabbitmq.exchange.messages.publish_out["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_out.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_out per second | <p>Rate of messages published from this overview into queues per second,0,rabbitmq,total msgs pub out rate</p> | DEPENDENT | rabbitmq.exchange.messages.publish_out.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_out_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages returned unroutable | <p>Count of messages returned to publisher as unroutable</p> | DEPENDENT | rabbitmq.exchange.messages.return_unroutable["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.return_unroutable.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages returned unroutable per second | <p>Rate of messages returned to publisher as unroutable per second</p> | DEPENDENT | rabbitmq.exchange.messages.return_unroutable.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.return_unroutable_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages redelivered | <p>Count of subset of messages in deliver_get which had the redelivered flag set</p> | DEPENDENT | rabbitmq.exchange.messages.redeliver["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.redeliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages redelivered per second | <p>Rate of subset of messages in deliver_get which had the redelivered flag set per second</p> | DEPENDENT | rabbitmq.exchange.messages.redeliver.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.redeliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| Zabbix_raw_items | RabbitMQ: Get overview | <p>The HTTP API endpoint that returns cluster-wide metrics</p> | HTTP_AGENT | rabbitmq.get_overview |
-| Zabbix_raw_items | RabbitMQ: Get exchanges | <p>The HTTP API endpoint that returns exchanges metrics</p> | HTTP_AGENT | rabbitmq.get_exchanges |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|RabbitMQ |RabbitMQ: Connections total |<p>Total number of connections</p> |DEPENDENT |rabbitmq.overview.object_totals.connections<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.connections`</p> |
+|RabbitMQ |RabbitMQ: Channels total |<p>Total number of channels</p> |DEPENDENT |rabbitmq.overview.object_totals.channels<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.channels`</p> |
+|RabbitMQ |RabbitMQ: Queues total |<p>Total number of queues</p> |DEPENDENT |rabbitmq.overview.object_totals.queues<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.queues`</p> |
+|RabbitMQ |RabbitMQ: Consumers total |<p>Total number of consumers</p> |DEPENDENT |rabbitmq.overview.object_totals.consumers<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.consumers`</p> |
+|RabbitMQ |RabbitMQ: Exchanges total |<p>Total number of exchanges</p> |DEPENDENT |rabbitmq.overview.object_totals.exchanges<p>**Preprocessing**:</p><p>- JSONPATH: `$.object_totals.exchanges`</p> |
+|RabbitMQ |RabbitMQ: Messages total |<p>Total number of messages (ready plus unacknowledged)</p> |DEPENDENT |rabbitmq.overview.queue_totals.messages<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages`</p> |
+|RabbitMQ |RabbitMQ: Messages ready for delivery |<p>Number of messages ready for deliver</p> |DEPENDENT |rabbitmq.overview.queue_totals.messages.ready<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages_ready`</p> |
+|RabbitMQ |RabbitMQ: Messages unacknowledged |<p>Number of unacknowledged messages</p> |DEPENDENT |rabbitmq.overview.queue_totals.messages.unacknowledged<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue_totals.messages_unacknowledged`</p> |
+|RabbitMQ |RabbitMQ: Messages acknowledged |<p>Number of messages delivered to clients and acknowledged</p> |DEPENDENT |rabbitmq.overview.messages.ack<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.ack`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages acknowledged per second |<p>Rate of messages delivered to clients and acknowledged per second</p> |DEPENDENT |rabbitmq.overview.messages.ack.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.ack_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages confirmed |<p>Count of messages confirmed</p> |DEPENDENT |rabbitmq.overview.messages.confirm<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.confirm`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages confirmed per second |<p>Rate of messages confirmed per second</p> |DEPENDENT |rabbitmq.overview.messages.confirm.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.confirm_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages delivered |<p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.overview.messages.deliver_get<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.deliver_get`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages delivered per second |<p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.overview.messages.deliver_get.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.deliver_get_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages published |<p>Count of messages published</p> |DEPENDENT |rabbitmq.overview.messages.publish<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages published per second |<p>Rate of messages published per second</p> |DEPENDENT |rabbitmq.overview.messages.publish.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages publish_in |<p>Count of messages published from channels into this overview</p> |DEPENDENT |rabbitmq.overview.messages.publish_in<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_in`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages publish_in per second |<p>Rate of messages published from channels into this overview per sec</p> |DEPENDENT |rabbitmq.overview.messages.publish_in.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_in_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages publish_out |<p>Count of messages published from this overview into queues</p> |DEPENDENT |rabbitmq.overview.messages.publish_out<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_out`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages publish_out per second |<p>Rate of messages published from this overview into queues per second,0,rabbitmq,total msgs pub out rate</p> |DEPENDENT |rabbitmq.overview.messages.publish_out.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.publish_out_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages returned unroutable |<p>Count of messages returned to publisher as unroutable</p> |DEPENDENT |rabbitmq.overview.messages.return_unroutable<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.return_unroutable`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages returned unroutable per second |<p>Rate of messages returned to publisher as unroutable per second</p> |DEPENDENT |rabbitmq.overview.messages.return_unroutable.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.return_unroutable_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages returned redeliver |<p>Count of subset of messages in deliver_get which had the redelivered flag set</p> |DEPENDENT |rabbitmq.overview.messages.redeliver<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.redeliver`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Messages returned redeliver per second |<p>Rate of subset of messages in deliver_get which had the redelivered flag set per second</p> |DEPENDENT |rabbitmq.overview.messages.redeliver.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.message_stats.redeliver_details.rate`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: alarms in effect in the cluster{#SINGLETON} |<p>Responds a 200 OK if there are no alarms in effect in the cluster, otherwise responds with a 503 Service Unavailable.</p> |HTTP_AGENT |rabbitmq.healthcheck.alarms[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages acknowledged |<p>Number of messages delivered to clients and acknowledged</p> |DEPENDENT |rabbitmq.exchange.messages.ack["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.ack.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages acknowledged per second |<p>Rate of messages delivered to clients and acknowledged per second</p> |DEPENDENT |rabbitmq.exchange.messages.ack.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.ack_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages confirmed |<p>Count of messages confirmed</p> |DEPENDENT |rabbitmq.exchange.messages.confirm["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.confirm.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages confirmed per second |<p>Rate of messages confirmed per second</p> |DEPENDENT |rabbitmq.exchange.messages.confirm.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.confirm_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages delivered |<p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.exchange.messages.deliver_get["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.deliver_get.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages delivered per second |<p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.exchange.messages.deliver_get.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.deliver_get_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages published |<p>Count of messages published</p> |DEPENDENT |rabbitmq.exchange.messages.publish["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages published per second |<p>Rate of messages published per second</p> |DEPENDENT |rabbitmq.exchange.messages.publish.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_in |<p>Count of messages published from channels into this overview</p> |DEPENDENT |rabbitmq.exchange.messages.publish_in["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_in.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_in per second |<p>Rate of messages published from channels into this overview per sec</p> |DEPENDENT |rabbitmq.exchange.messages.publish_in.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_in_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_out |<p>Count of messages published from this overview into queues</p> |DEPENDENT |rabbitmq.exchange.messages.publish_out["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_out.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages publish_out per second |<p>Rate of messages published from this overview into queues per second,0,rabbitmq,total msgs pub out rate</p> |DEPENDENT |rabbitmq.exchange.messages.publish_out.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.publish_out_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages returned unroutable |<p>Count of messages returned to publisher as unroutable</p> |DEPENDENT |rabbitmq.exchange.messages.return_unroutable["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.return_unroutable.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages returned unroutable per second |<p>Rate of messages returned to publisher as unroutable per second</p> |DEPENDENT |rabbitmq.exchange.messages.return_unroutable.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.return_unroutable_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages redelivered |<p>Count of subset of messages in deliver_get which had the redelivered flag set</p> |DEPENDENT |rabbitmq.exchange.messages.redeliver["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.redeliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Exchange {#VHOST}/{#EXCHANGE}/{#TYPE}: Messages redelivered per second |<p>Rate of subset of messages in deliver_get which had the redelivered flag set per second</p> |DEPENDENT |rabbitmq.exchange.messages.redeliver.rate["{#VHOST}/{#EXCHANGE}/{#TYPE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#EXCHANGE}" && @.vhost == "{#VHOST}" && @.type =="{#TYPE}")].message_stats.redeliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|Zabbix_raw_items |RabbitMQ: Get overview |<p>The HTTP API endpoint that returns cluster-wide metrics</p> |HTTP_AGENT |rabbitmq.get_overview |
+|Zabbix_raw_items |RabbitMQ: Get exchanges |<p>The HTTP API endpoint that returns exchanges metrics</p> |HTTP_AGENT |rabbitmq.get_exchanges |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|--------------------------------------------------------------|-----------------------------------------------------------------------|------------------------------------------------------------------------|----------|----------------------------------|
-| RabbitMQ: There are active alarms in the cluster | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:rabbitmq.healthcheck.alarms[{#SINGLETON}].last()}=503` | AVERAGE | |
-| RabbitMQ: Failed to fetch overview data (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes</p> | `{TEMPLATE_NAME:rabbitmq.get_overview.nodata(30m)}=1` | WARNING | <p>Manual close: YES</p> |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|RabbitMQ: There are active alarms in the cluster |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ cluster by HTTP/rabbitmq.healthcheck.alarms[{#SINGLETON}])=0` |AVERAGE | |
+|RabbitMQ: Failed to fetch overview data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes</p> |`nodata(/RabbitMQ cluster by HTTP/rabbitmq.get_overview,30m)=1` |WARNING |<p>Manual close: YES</p> |
## Feedback
@@ -130,7 +130,7 @@ For Zabbix version: 5.4 and higher
The template to monitor RabbitMQ by Zabbix that work without any external scripts.
Most of the metrics are collected in one go, thanks to Zabbix bulk data collection.
-Template `RabbitMQ Node` — (Zabbix version >= 4.2) collects metrics by polling [RabbitMQ management plugin](https://www.rabbitmq.com/management.html) with HTTP agent remotely.
+Template `RabbitMQ Node` — (Zabbix version >= 4.2) collects metrics by polling [RabbitMQ management plugin](https://www.rabbitmq.com/management.html) with HTTP agent remotely.
@@ -140,7 +140,7 @@ This template was tested on:
## Setup
-Enable the RabbitMQ management plugin. See [RabbitMQ’s documentation](https://www.rabbitmq.com/management.html) to enable it.
+Enable the RabbitMQ management plugin. See [RabbitMQ's documentation](https://www.rabbitmq.com/management.html) to enable it.
Create a user to monitor the service:
@@ -162,17 +162,17 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|------------------------------------------|-------------------------------------------------------------------------|--------------------|
-| {$RABBITMQ.API.PASSWORD} | <p>-</p> | `zabbix` |
-| {$RABBITMQ.API.PORT} | <p>The port of RabbitMQ API endpoint</p> | `15672` |
-| {$RABBITMQ.API.SCHEME} | <p>Request scheme which may be http or https</p> | `http` |
-| {$RABBITMQ.API.USER} | <p>-</p> | `zbx_monitor` |
-| {$RABBITMQ.CLUSTER.NAME} | <p>The name of RabbitMQ cluster</p> | `rabbit` |
-| {$RABBITMQ.LLD.FILTER.QUEUE.MATCHES} | <p>Filter of discoverable queues</p> | `.*` |
-| {$RABBITMQ.LLD.FILTER.QUEUE.NOT_MATCHES} | <p>Filter to exclude discovered queues</p> | `CHANGE_IF_NEEDED` |
-| {$RABBITMQ.MESSAGES.MAX.WARN} | <p>Maximum number of messages in the queue for trigger expression</p> | `1000` |
-| {$RABBITMQ.RESPONSE_TIME.MAX.WARN} | <p>Maximum RabbitMQ response time in seconds for trigger expression</p> | `10` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$RABBITMQ.API.PASSWORD} |<p>-</p> |`zabbix` |
+|{$RABBITMQ.API.PORT} |<p>The port of RabbitMQ API endpoint</p> |`15672` |
+|{$RABBITMQ.API.SCHEME} |<p>Request scheme which may be http or https</p> |`http` |
+|{$RABBITMQ.API.USER} |<p>-</p> |`zbx_monitor` |
+|{$RABBITMQ.CLUSTER.NAME} |<p>The name of RabbitMQ cluster</p> |`rabbit` |
+|{$RABBITMQ.LLD.FILTER.QUEUE.MATCHES} |<p>Filter of discoverable queues</p> |`.*` |
+|{$RABBITMQ.LLD.FILTER.QUEUE.NOT_MATCHES} |<p>Filter to exclude discovered queues</p> |`CHANGE_IF_NEEDED` |
+|{$RABBITMQ.MESSAGES.MAX.WARN} |<p>Maximum number of messages in the queue for trigger expression</p> |`1000` |
+|{$RABBITMQ.RESPONSE_TIME.MAX.WARN} |<p>Maximum RabbitMQ response time in seconds for trigger expression</p> |`10` |
## Template links
@@ -180,81 +180,81 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|--------------------------------|-----------------------------------------------------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Health Check 3.8.10+ discovery | <p>Version 3.8.10+ specific metrics</p> | DEPENDENT | rabbitmq.healthcheck.v3810.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Health Check 3.8.9- discovery | <p>Specific metrics up to and including version 3.8.4</p> | DEPENDENT | rabbitmq.healthcheck.v389.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Queues discovery | <p>Individual queue metrics</p> | DEPENDENT | rabbitmq.queues.discovery<p>**Filter**:</p>AND <p>- A: {#QUEUE} MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.QUEUE.MATCHES}`</p><p>- B: {#QUEUE} NOT_MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.QUEUE.NOT_MATCHES}`</p><p>- C: {#NODE} MATCHES_REGEX `{$RABBITMQ.CLUSTER.NAME}@{HOST.NAME}`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Health Check 3.8.10+ discovery |<p>Version 3.8.10+ specific metrics</p> |DEPENDENT |rabbitmq.healthcheck.v3810.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Health Check 3.8.9- discovery |<p>Specific metrics up to and including version 3.8.4</p> |DEPENDENT |rabbitmq.healthcheck.v389.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Queues discovery |<p>Individual queue metrics</p> |DEPENDENT |rabbitmq.queues.discovery<p>**Filter**:</p>AND <p>- {#QUEUE} MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.QUEUE.MATCHES}`</p><p>- {#QUEUE} NOT_MATCHES_REGEX `{$RABBITMQ.LLD.FILTER.QUEUE.NOT_MATCHES}`</p><p>- {#NODE} MATCHES_REGEX `{$RABBITMQ.CLUSTER.NAME}@{HOST.NAME}`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| RabbitMQ | RabbitMQ: Management plugin version | <p>Version of the management plugin in use</p> | DEPENDENT | rabbitmq.node.overview.management_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| RabbitMQ | RabbitMQ: RabbitMQ version | <p>Version of RabbitMQ on the node which processed this request</p> | DEPENDENT | rabbitmq.node.overview.rabbitmq_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.rabbitmq_version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-| RabbitMQ | RabbitMQ: Used file descriptors | <p>Used file descriptors</p> | DEPENDENT | rabbitmq.node.fd_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.fd_used`</p> |
-| RabbitMQ | RabbitMQ: Free disk space | <p>Current free disk space</p> | DEPENDENT | rabbitmq.node.disk_free<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free`</p> |
-| RabbitMQ | RabbitMQ: Disk free limit | <p>Disk free space limit in bytes</p> | DEPENDENT | rabbitmq.node.disk_free_limit<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free_limit`</p> |
-| RabbitMQ | RabbitMQ: Memory used | <p>Memory used in bytes</p> | DEPENDENT | rabbitmq.node.mem_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_used`</p> |
-| RabbitMQ | RabbitMQ: Memory limit | <p>Memory usage high watermark in bytes</p> | DEPENDENT | rabbitmq.node.mem_limit<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_limit`</p> |
-| RabbitMQ | RabbitMQ: Runtime run queue | <p>Average number of Erlang processes waiting to run</p> | DEPENDENT | rabbitmq.node.run_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.run_queue`</p> |
-| RabbitMQ | RabbitMQ: Sockets used | <p>Number of file descriptors used as sockets</p> | DEPENDENT | rabbitmq.node.sockets_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.sockets_used`</p> |
-| RabbitMQ | RabbitMQ: Sockets available | <p>File descriptors available for use as sockets</p> | DEPENDENT | rabbitmq.node.sockets_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.sockets_total`</p> |
-| RabbitMQ | RabbitMQ: Number of network partitions | <p>Number of network partitions this node is seeing</p> | DEPENDENT | rabbitmq.node.partitions<p>**Preprocessing**:</p><p>- JSONPATH: `$.partitions`</p><p>- JAVASCRIPT: `return JSON.parse(value).length;`</p> |
-| RabbitMQ | RabbitMQ: Is running | <p>Is the node running or not</p> | DEPENDENT | rabbitmq.node.running<p>**Preprocessing**:</p><p>- JSONPATH: `$.running`</p><p>- BOOL_TO_DECIMAL |
-| RabbitMQ | RabbitMQ: Memory alarm | <p>Does the host has memory alarm</p> | DEPENDENT | rabbitmq.node.mem_alarm<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_alarm`</p><p>- BOOL_TO_DECIMAL |
-| RabbitMQ | RabbitMQ: Disk free alarm | <p>Does the node have disk alarm</p> | DEPENDENT | rabbitmq.node.disk_free_alarm<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free_alarm`</p><p>- BOOL_TO_DECIMAL |
-| RabbitMQ | RabbitMQ: Uptime | <p>Uptime in milliseconds</p> | DEPENDENT | rabbitmq.node.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.uptime`</p><p>- MULTIPLIER: `0.001`</p> |
-| RabbitMQ | RabbitMQ: Service ping | <p>-</p> | SIMPLE | net.tcp.service[http,"{HOST.CONN}","{$RABBITMQ.API.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-| RabbitMQ | RabbitMQ: Service response time | <p>-</p> | SIMPLE | net.tcp.service.perf[http,"{HOST.CONN}","{$RABBITMQ.API.PORT}"] |
-| RabbitMQ | RabbitMQ: Healthcheck: local alarms in effect on the this node{#SINGLETON} | <p>Responds a 200 OK if there are no local alarms in effect on the target node, otherwise responds with a 503 Service Unavailable.</p> | HTTP_AGENT | rabbitmq.healthcheck.local_alarms[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: expiration date on the certificates{#SINGLETON} | <p>Checks the expiration date on the certificates for every listener configured to use TLS. Responds a 200 OK if all certificates are valid (have not expired), otherwise responds with a 503 Service Unavailable.</p> | HTTP_AGENT | rabbitmq.healthcheck.certificate_expiration[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: virtual hosts on the this node{#SINGLETON} | <p>Responds a 200 OK if all virtual hosts and running on the target node, otherwise responds with a 503 Service Unavailable.</p> | HTTP_AGENT | rabbitmq.healthcheck.virtual_hosts[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: classic mirrored queues without synchronised mirrors online{#SINGLETON} | <p>Checks if there are classic mirrored queues without synchronised mirrors online (queues that would potentially lose data if the target node is shut down). Responds a 200 OK if there are no such classic mirrored queues, otherwise responds with a 503 Service Unavailable.</p> | HTTP_AGENT | rabbitmq.healthcheck.mirror_sync[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck: queues with minimum online quorum{#SINGLETON} | <p>Checks if there are quorum queues with minimum online quorum (queues that would lose their quorum and availability if the target node is shut down). Responds a 200 OK if there are no such quorum queues, otherwise responds with a 503 Service Unavailable.</p> | HTTP_AGENT | rabbitmq.healthcheck.quorum[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| RabbitMQ | RabbitMQ: Healthcheck{#SINGLETON} | <p>Runs basic healthchecks in the current node. Checks that the rabbit application is running, channels and queues can be listed successfully, and that no alarms are in effect.</p> | HTTP_AGENT | rabbitmq.healthcheck[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.status`</p><p>- BOOL_TO_DECIMAL |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages | <p>Count of the total messages in the queue</p> | DEPENDENT | rabbitmq.queue.messages["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages per second | <p>Count per second of the total messages in the queue</p> | DEPENDENT | rabbitmq.queue.messages.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_details.rate.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Consumers | <p>Number of consumers</p> | DEPENDENT | rabbitmq.queue.consumers["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].consumers.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Memory | <p>Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures</p> | DEPENDENT | rabbitmq.queue.memory["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].memory.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages ready | <p>Number of messages ready to be delivered to clients</p> | DEPENDENT | rabbitmq.queue.messages_ready["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_ready.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages ready per second | <p>Number per second of messages ready to be delivered to clients</p> | DEPENDENT | rabbitmq.queue.messages_ready.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_ready_details.rate.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages unacknowledged | <p>Number of messages delivered to clients but not yet acknowledged</p> | DEPENDENT | rabbitmq.queue.messages_unacknowledged["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_unacknowledged.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages unacknowledged per second | <p>Number per second of messages delivered to clients but not yet acknowledged</p> | DEPENDENT | rabbitmq.queue.messages_unacknowledged.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_unacknowledged_details.rate.first()`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages acknowledged | <p>Number of messages delivered to clients and acknowledged</p> | DEPENDENT | rabbitmq.queue.messages.ack["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.ack.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages acknowledged per second | <p>Number per second of messages delivered to clients and acknowledged</p> | DEPENDENT | rabbitmq.queue.messages.ack.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.ack_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered | <p>Count of messages delivered in acknowledgement mode to consumers</p> | DEPENDENT | rabbitmq.queue.messages.deliver["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered per second | <p>Count of messages delivered in acknowledgement mode to consumers</p> | DEPENDENT | rabbitmq.queue.messages.deliver.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered | <p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.queue.messages.deliver_get["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_get.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered per second | <p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> | DEPENDENT | rabbitmq.queue.messages.deliver_get.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_get_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages published | <p>Count of messages published</p> | DEPENDENT | rabbitmq.queue.messages.publish["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.publish.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages published per second | <p>Rate per second of messages published</p> | DEPENDENT | rabbitmq.queue.messages.publish.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.publish_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages redelivered | <p>Count of subset of messages in deliver_get which had the redelivered flag set</p> | DEPENDENT | rabbitmq.queue.messages.redeliver["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.redeliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| RabbitMQ | RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages redelivered per second | <p>Rate per second of subset of messages in deliver_get which had the redelivered flag set</p> | DEPENDENT | rabbitmq.queue.messages.redeliver.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.redeliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-| Zabbix_raw_items | RabbitMQ: Get node overview | <p>The HTTP API endpoint that returns cluster-wide metrics</p> | HTTP_AGENT | rabbitmq.get_node_overview |
-| Zabbix_raw_items | RabbitMQ: Get nodes | <p>The HTTP API endpoint that returns nodes metrics</p> | HTTP_AGENT | rabbitmq.get_nodes |
-| Zabbix_raw_items | RabbitMQ: Get queues | <p>The HTTP API endpoint that returns queues metrics</p> | HTTP_AGENT | rabbitmq.get_queues |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|RabbitMQ |RabbitMQ: Management plugin version |<p>Version of the management plugin in use</p> |DEPENDENT |rabbitmq.node.overview.management_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.management_version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|RabbitMQ |RabbitMQ: RabbitMQ version |<p>Version of RabbitMQ on the node which processed this request</p> |DEPENDENT |rabbitmq.node.overview.rabbitmq_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.rabbitmq_version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|RabbitMQ |RabbitMQ: Used file descriptors |<p>Used file descriptors</p> |DEPENDENT |rabbitmq.node.fd_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.fd_used`</p> |
+|RabbitMQ |RabbitMQ: Free disk space |<p>Current free disk space</p> |DEPENDENT |rabbitmq.node.disk_free<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free`</p> |
+|RabbitMQ |RabbitMQ: Disk free limit |<p>Disk free space limit in bytes</p> |DEPENDENT |rabbitmq.node.disk_free_limit<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free_limit`</p> |
+|RabbitMQ |RabbitMQ: Memory used |<p>Memory used in bytes</p> |DEPENDENT |rabbitmq.node.mem_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_used`</p> |
+|RabbitMQ |RabbitMQ: Memory limit |<p>Memory usage high watermark in bytes</p> |DEPENDENT |rabbitmq.node.mem_limit<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_limit`</p> |
+|RabbitMQ |RabbitMQ: Runtime run queue |<p>Average number of Erlang processes waiting to run</p> |DEPENDENT |rabbitmq.node.run_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.run_queue`</p> |
+|RabbitMQ |RabbitMQ: Sockets used |<p>Number of file descriptors used as sockets</p> |DEPENDENT |rabbitmq.node.sockets_used<p>**Preprocessing**:</p><p>- JSONPATH: `$.sockets_used`</p> |
+|RabbitMQ |RabbitMQ: Sockets available |<p>File descriptors available for use as sockets</p> |DEPENDENT |rabbitmq.node.sockets_total<p>**Preprocessing**:</p><p>- JSONPATH: `$.sockets_total`</p> |
+|RabbitMQ |RabbitMQ: Number of network partitions |<p>Number of network partitions this node is seeing</p> |DEPENDENT |rabbitmq.node.partitions<p>**Preprocessing**:</p><p>- JSONPATH: `$.partitions`</p><p>- JAVASCRIPT: `return JSON.parse(value).length;`</p> |
+|RabbitMQ |RabbitMQ: Is running |<p>Is the node running or not</p> |DEPENDENT |rabbitmq.node.running<p>**Preprocessing**:</p><p>- JSONPATH: `$.running`</p><p>- BOOL_TO_DECIMAL</p> |
+|RabbitMQ |RabbitMQ: Memory alarm |<p>Does the host has memory alarm</p> |DEPENDENT |rabbitmq.node.mem_alarm<p>**Preprocessing**:</p><p>- JSONPATH: `$.mem_alarm`</p><p>- BOOL_TO_DECIMAL</p> |
+|RabbitMQ |RabbitMQ: Disk free alarm |<p>Does the node have disk alarm</p> |DEPENDENT |rabbitmq.node.disk_free_alarm<p>**Preprocessing**:</p><p>- JSONPATH: `$.disk_free_alarm`</p><p>- BOOL_TO_DECIMAL</p> |
+|RabbitMQ |RabbitMQ: Uptime |<p>Uptime in milliseconds</p> |DEPENDENT |rabbitmq.node.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.uptime`</p><p>- MULTIPLIER: `0.001`</p> |
+|RabbitMQ |RabbitMQ: Service ping |<p>-</p> |SIMPLE |net.tcp.service[http,"{HOST.CONN}","{$RABBITMQ.API.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|RabbitMQ |RabbitMQ: Service response time |<p>-</p> |SIMPLE |net.tcp.service.perf[http,"{HOST.CONN}","{$RABBITMQ.API.PORT}"] |
+|RabbitMQ |RabbitMQ: Healthcheck: local alarms in effect on the this node{#SINGLETON} |<p>Responds a 200 OK if there are no local alarms in effect on the target node, otherwise responds with a 503 Service Unavailable.</p> |HTTP_AGENT |rabbitmq.healthcheck.local_alarms[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: expiration date on the certificates{#SINGLETON} |<p>Checks the expiration date on the certificates for every listener configured to use TLS. Responds a 200 OK if all certificates are valid (have not expired), otherwise responds with a 503 Service Unavailable.</p> |HTTP_AGENT |rabbitmq.healthcheck.certificate_expiration[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: virtual hosts on the this node{#SINGLETON} |<p>Responds a 200 OK if all virtual hosts and running on the target node, otherwise responds with a 503 Service Unavailable.</p> |HTTP_AGENT |rabbitmq.healthcheck.virtual_hosts[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: classic mirrored queues without synchronised mirrors online{#SINGLETON} |<p>Checks if there are classic mirrored queues without synchronised mirrors online (queues that would potentially lose data if the target node is shut down). Responds a 200 OK if there are no such classic mirrored queues, otherwise responds with a 503 Service Unavailable.</p> |HTTP_AGENT |rabbitmq.healthcheck.mirror_sync[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck: queues with minimum online quorum{#SINGLETON} |<p>Checks if there are quorum queues with minimum online quorum (queues that would lose their quorum and availability if the target node is shut down). Responds a 200 OK if there are no such quorum queues, otherwise responds with a 503 Service Unavailable.</p> |HTTP_AGENT |rabbitmq.healthcheck.quorum[{#SINGLETON}]<p>**Preprocessing**:</p><p>- REGEX: `HTTP\/1\.1\b\s(\d+) \1`</p><p>- JAVASCRIPT: `switch(value){ case '200': return 1 case '503': return 0 default: 2}`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|RabbitMQ |RabbitMQ: Healthcheck{#SINGLETON} |<p>Runs basic healthchecks in the current node. Checks that the rabbit application is running, channels and queues can be listed successfully, and that no alarms are in effect.</p> |HTTP_AGENT |rabbitmq.healthcheck[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.status`</p><p>- BOOL_TO_DECIMAL</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages |<p>Count of the total messages in the queue</p> |DEPENDENT |rabbitmq.queue.messages["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages per second |<p>Count per second of the total messages in the queue</p> |DEPENDENT |rabbitmq.queue.messages.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_details.rate.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Consumers |<p>Number of consumers</p> |DEPENDENT |rabbitmq.queue.consumers["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].consumers.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Memory |<p>Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures</p> |DEPENDENT |rabbitmq.queue.memory["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].memory.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages ready |<p>Number of messages ready to be delivered to clients</p> |DEPENDENT |rabbitmq.queue.messages_ready["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_ready.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages ready per second |<p>Number per second of messages ready to be delivered to clients</p> |DEPENDENT |rabbitmq.queue.messages_ready.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_ready_details.rate.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages unacknowledged |<p>Number of messages delivered to clients but not yet acknowledged</p> |DEPENDENT |rabbitmq.queue.messages_unacknowledged["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_unacknowledged.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages unacknowledged per second |<p>Number per second of messages delivered to clients but not yet acknowledged</p> |DEPENDENT |rabbitmq.queue.messages_unacknowledged.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].messages_unacknowledged_details.rate.first()`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages acknowledged |<p>Number of messages delivered to clients and acknowledged</p> |DEPENDENT |rabbitmq.queue.messages.ack["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.ack.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages acknowledged per second |<p>Number per second of messages delivered to clients and acknowledged</p> |DEPENDENT |rabbitmq.queue.messages.ack.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.ack_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered |<p>Count of messages delivered in acknowledgement mode to consumers</p> |DEPENDENT |rabbitmq.queue.messages.deliver["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered per second |<p>Count of messages delivered in acknowledgement mode to consumers</p> |DEPENDENT |rabbitmq.queue.messages.deliver.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered |<p>Sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.queue.messages.deliver_get["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_get.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages delivered per second |<p>Rate per second of the sum of messages delivered in acknowledgement mode to consumers, in no-acknowledgement mode to consumers, in acknowledgement mode in response to basic.get, and in no-acknowledgement mode in response to basic.get</p> |DEPENDENT |rabbitmq.queue.messages.deliver_get.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.deliver_get_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages published |<p>Count of messages published</p> |DEPENDENT |rabbitmq.queue.messages.publish["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.publish.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages published per second |<p>Rate per second of messages published</p> |DEPENDENT |rabbitmq.queue.messages.publish.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.publish_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages redelivered |<p>Count of subset of messages in deliver_get which had the redelivered flag set</p> |DEPENDENT |rabbitmq.queue.messages.redeliver["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.redeliver.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|RabbitMQ |RabbitMQ: Queue {#VHOST}/{#QUEUE}: Messages redelivered per second |<p>Rate per second of subset of messages in deliver_get which had the redelivered flag set</p> |DEPENDENT |rabbitmq.queue.messages.redeliver.rate["{#VHOST}/{#QUEUE}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$[?(@.name == "{#QUEUE}" && @.vhost == "{#VHOST}")].message_stats.redeliver_details.rate.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
+|Zabbix_raw_items |RabbitMQ: Get node overview |<p>The HTTP API endpoint that returns cluster-wide metrics</p> |HTTP_AGENT |rabbitmq.get_node_overview |
+|Zabbix_raw_items |RabbitMQ: Get nodes |<p>The HTTP API endpoint that returns nodes metrics</p> |HTTP_AGENT |rabbitmq.get_nodes |
+|Zabbix_raw_items |RabbitMQ: Get queues |<p>The HTTP API endpoint that returns queues metrics</p> |HTTP_AGENT |rabbitmq.get_queues |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|---------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|----------|----------------------------------------------------------------------------------|
-| RabbitMQ: Version has changed (new version: {ITEM.VALUE}) | <p>RabbitMQ version has changed. Ack to close.</p> | `{TEMPLATE_NAME:rabbitmq.node.overview.rabbitmq_version.diff()}=1 and {TEMPLATE_NAME:rabbitmq.node.overview.rabbitmq_version.strlen()}>0` | INFO | <p>Manual close: YES</p> |
-| RabbitMQ: Number of network partitions is too high (more than 0 for 5m) | <p>https://www.rabbitmq.com/partitions.html#detecting</p> | `{TEMPLATE_NAME:rabbitmq.node.partitions.min(5m)}>0` | WARNING | |
-| RabbitMQ: Node is not running | <p>RabbitMQ node is not running</p> | `{TEMPLATE_NAME:rabbitmq.node.running.max(5m)}=0` | AVERAGE | <p>**Depends on**:</p><p>- RabbitMQ: Service is down</p> |
-| RabbitMQ: Memory alarm (Memory usage threshold has been reached) | <p>https://www.rabbitmq.com/memory.html</p> | `{TEMPLATE_NAME:rabbitmq.node.mem_alarm.last()}=1` | AVERAGE | |
-| RabbitMQ: Free disk space alarm (Free space threshold has been reached) | <p>https://www.rabbitmq.com/disk-alarms.html</p> | `{TEMPLATE_NAME:rabbitmq.node.disk_free_alarm.last()}=1` | AVERAGE | |
-| RabbitMQ: has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:rabbitmq.node.uptime.last()}<10m` | INFO | <p>Manual close: YES</p> |
-| RabbitMQ: Service is down | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service[http,"{HOST.CONN}","{$RABBITMQ.API.PORT}"].last()}=0` | AVERAGE | <p>Manual close: YES</p> |
-| RabbitMQ: Service response time is too high (over {$RABBITMQ.RESPONSE_TIME.MAX.WARN}s for 5m) | <p>-</p> | `{TEMPLATE_NAME:net.tcp.service.perf[http,"{HOST.CONN}","{$RABBITMQ.API.PORT}"].min(5m)}>{$RABBITMQ.RESPONSE_TIME.MAX.WARN}` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Service is down</p> |
-| RabbitMQ: There are active alarms in the node | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:rabbitmq.healthcheck.local_alarms[{#SINGLETON}].last()}=503` | AVERAGE | |
-| RabbitMQ: There are valid TLS certificates expiring in the next month | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:rabbitmq.healthcheck.certificate_expiration[{#SINGLETON}].last()}=503` | AVERAGE | |
-| RabbitMQ: There are not running virtual hosts | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:rabbitmq.healthcheck.virtual_hosts[{#SINGLETON}].last()}=503` | AVERAGE | |
-| RabbitMQ: There are queues that could potentially lose data if the this node goes offline. | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:rabbitmq.healthcheck.mirror_sync[{#SINGLETON}].last()}=503` | AVERAGE | |
-| RabbitMQ: There are queues that would lose their quorum and availability if the this node is shut down. | <p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> | `{TEMPLATE_NAME:rabbitmq.healthcheck.quorum[{#SINGLETON}].last()}=503` | AVERAGE | |
-| RabbitMQ: Node healthcheck failed | <p>https://www.rabbitmq.com/monitoring.html#health-checks</p> | `{TEMPLATE_NAME:rabbitmq.healthcheck[{#SINGLETON}].last()}=0` | AVERAGE | |
-| RabbitMQ: Too many messages in queue (over {$RABBITMQ.MESSAGES.MAX.WARN} for 5m) | <p>-</p> | `{TEMPLATE_NAME:rabbitmq.queue.messages["{#VHOST}/{#QUEUE}"].min(5m)}>{$RABBITMQ.MESSAGES.MAX.WARN:"{#QUEUE}"}` | WARNING | |
-| RabbitMQ: Failed to fetch nodes data (or no data for 30m) | <p>Zabbix has not received data for items for the last 30 minutes.</p> | `{TEMPLATE_NAME:rabbitmq.get_nodes.nodata(30m)}=1` | WARNING | <p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Service is down</p> |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|RabbitMQ: Version has changed (new version: {ITEM.VALUE}) |<p>RabbitMQ version has changed. Ack to close.</p> |`last(/RabbitMQ node by HTTP/rabbitmq.node.overview.rabbitmq_version,#1)<>last(/RabbitMQ node by HTTP/rabbitmq.node.overview.rabbitmq_version,#2) and length(last(/RabbitMQ node by HTTP/rabbitmq.node.overview.rabbitmq_version))>0` |INFO |<p>Manual close: YES</p> |
+|RabbitMQ: Number of network partitions is too high (more than 0 for 5m) |<p>https://www.rabbitmq.com/partitions.html#detecting</p> |`min(/RabbitMQ node by HTTP/rabbitmq.node.partitions,5m)>0` |WARNING | |
+|RabbitMQ: Node is not running |<p>RabbitMQ node is not running</p> |`max(/RabbitMQ node by HTTP/rabbitmq.node.running,5m)=0` |AVERAGE |<p>**Depends on**:</p><p>- RabbitMQ: Service is down</p> |
+|RabbitMQ: Memory alarm (Memory usage threshold has been reached) |<p>https://www.rabbitmq.com/memory.html</p> |`last(/RabbitMQ node by HTTP/rabbitmq.node.mem_alarm)=1` |AVERAGE | |
+|RabbitMQ: Free disk space alarm (Free space threshold has been reached) |<p>https://www.rabbitmq.com/disk-alarms.html</p> |`last(/RabbitMQ node by HTTP/rabbitmq.node.disk_free_alarm)=1` |AVERAGE | |
+|RabbitMQ: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/RabbitMQ node by HTTP/rabbitmq.node.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|RabbitMQ: Service is down |<p>-</p> |`last(/RabbitMQ node by HTTP/net.tcp.service[http,"{HOST.CONN}","{$RABBITMQ.API.PORT}"])=0` |AVERAGE |<p>Manual close: YES</p> |
+|RabbitMQ: Service response time is too high (over {$RABBITMQ.RESPONSE_TIME.MAX.WARN}s for 5m) |<p>-</p> |`min(/RabbitMQ node by HTTP/net.tcp.service.perf[http,"{HOST.CONN}","{$RABBITMQ.API.PORT}"],5m)>{$RABBITMQ.RESPONSE_TIME.MAX.WARN}` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Service is down</p> |
+|RabbitMQ: There are active alarms in the node |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by HTTP/rabbitmq.healthcheck.local_alarms[{#SINGLETON}])=0` |AVERAGE | |
+|RabbitMQ: There are valid TLS certificates expiring in the next month |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by HTTP/rabbitmq.healthcheck.certificate_expiration[{#SINGLETON}])=0` |AVERAGE | |
+|RabbitMQ: There are not running virtual hosts |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by HTTP/rabbitmq.healthcheck.virtual_hosts[{#SINGLETON}])=0` |AVERAGE | |
+|RabbitMQ: There are queues that could potentially lose data if the this node goes offline. |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by HTTP/rabbitmq.healthcheck.mirror_sync[{#SINGLETON}])=0` |AVERAGE | |
+|RabbitMQ: There are queues that would lose their quorum and availability if the this node is shut down. |<p>http://{HOST.CONN}:{$RABBITMQ.API.PORT}/api/index.html</p> |`last(/RabbitMQ node by HTTP/rabbitmq.healthcheck.quorum[{#SINGLETON}])=0` |AVERAGE | |
+|RabbitMQ: Node healthcheck failed |<p>https://www.rabbitmq.com/monitoring.html#health-checks</p> |`last(/RabbitMQ node by HTTP/rabbitmq.healthcheck[{#SINGLETON}])=0` |AVERAGE | |
+|RabbitMQ: Too many messages in queue (over {$RABBITMQ.MESSAGES.MAX.WARN} for 5m) |<p>-</p> |`min(/RabbitMQ node by HTTP/rabbitmq.queue.messages["{#VHOST}/{#QUEUE}"],5m)>{$RABBITMQ.MESSAGES.MAX.WARN:"{#QUEUE}"}` |WARNING | |
+|RabbitMQ: Failed to fetch nodes data (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/RabbitMQ node by HTTP/rabbitmq.get_nodes,30m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- RabbitMQ: Service is down</p> |
## Feedback
diff --git a/templates/app/rabbitmq_http/template_app_rabbitmq_http.yaml b/templates/app/rabbitmq_http/template_app_rabbitmq_http.yaml
index 3a148a7f0f3..7f31ad9be4d 100644
--- a/templates/app/rabbitmq_http/template_app_rabbitmq_http.yaml
+++ b/templates/app/rabbitmq_http/template_app_rabbitmq_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:17Z'
+ date: '2021-12-19T15:19:49Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -15,7 +15,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/387226-discussion-thread-for-official-zabbix-template-rabbitmq
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -1001,7 +1001,7 @@ zabbix_export:
- |
var rabbit_version = parseInt(value.split('.')[0]) * 10000 +
parseInt(value.split('.')[1]) * 100 +
- parseInt(value.split('.')[2])
+ parseInt(value.split('.')[2])
return JSON.stringify(rabbit_version >= 30810 ? [{'{#SINGLETON}': ''}] : []);
macros:
-
@@ -1136,7 +1136,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/387226-discussion-thread-for-official-zabbix-template-rabbitmq
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -1690,7 +1690,7 @@ zabbix_export:
- |
var rabbit_version = parseInt(value.split('.')[0]) * 10000 +
parseInt(value.split('.')[1]) * 100 +
- parseInt(value.split('.')[2])
+ parseInt(value.split('.')[2])
return JSON.stringify(rabbit_version < 30810 ? [{'{#SINGLETON}': ''}] : []);
-
uuid: ab89d103c015400fbbeecaaf9bf01400
@@ -1938,7 +1938,7 @@ zabbix_export:
- |
var rabbit_version = parseInt(value.split('.')[0]) * 10000 +
parseInt(value.split('.')[1]) * 100 +
- parseInt(value.split('.')[2])
+ parseInt(value.split('.')[2])
return JSON.stringify(rabbit_version >= 30810 ? [{'{#SINGLETON}': ''}] : []);
-
uuid: cc1d85b6045b44b38f588cd9a4c6c62d
diff --git a/templates/app/sharepoint_http/README.md b/templates/app/sharepoint_http/README.md
index c9f25e7b5f2..2e93c8b51ab 100644
--- a/templates/app/sharepoint_http/README.md
+++ b/templates/app/sharepoint_http/README.md
@@ -16,7 +16,7 @@ This template was tested on:
> See [Zabbix template operation](https://www.zabbix.com/documentation/5.4/manual/config/templates_out_of_the_box/http) for basic instructions.
-Create a new host.
+Create a new host.
Define macros according to your Sharepoint web portal.
It is recommended to fill in the values of the filter macros to avoid getting redundant data.
@@ -30,12 +30,12 @@ No specific Zabbix configuration is required.
|Name|Description|Default|
|----|-----------|-------|
|{$SHAREPOINT.GET_INTERVAL} |<p>-</p> |`1m` |
-|{$SHAREPOINT.LLD.FILTER.FULL_PATH.MATCHES} |<p>Filter of discoverable dictionaries by full path</p> |`^/` |
-|{$SHAREPOINT.LLD.FILTER.FULL_PATH.NOT_MATCHES} |<p>Filter to exclude discovered dictionaries by full path</p> |`CHANGE_IF_NEEDED` |
-|{$SHAREPOINT.LLD.FILTER.NAME.MATCHES} |<p>Filter of discoverable dictionaries by name</p> |`.*` |
-|{$SHAREPOINT.LLD.FILTER.NAME.NOT_MATCHES} |<p>Filter to exclude discovered dictionaries by name</p> |`CHANGE_IF_NEEDED` |
+|{$SHAREPOINT.LLD.FILTER.FULL_PATH.MATCHES} |<p>Filter of discoverable dictionaries by full path.</p> |`^/` |
+|{$SHAREPOINT.LLD.FILTER.FULL_PATH.NOT_MATCHES} |<p>Filter to exclude discovered dictionaries by full path.</p> |`CHANGE_IF_NEEDED` |
+|{$SHAREPOINT.LLD.FILTER.NAME.MATCHES} |<p>Filter of discoverable dictionaries by name.</p> |`.*` |
+|{$SHAREPOINT.LLD.FILTER.NAME.NOT_MATCHES} |<p>Filter to exclude discovered dictionaries by name.</p> |`CHANGE_IF_NEEDED` |
|{$SHAREPOINT.LLD.FILTER.TYPE.MATCHES} |<p>Filter of discoverable types.</p> |`FOLDER` |
-|{$SHAREPOINT.LLD.FILTER.TYPE.NOT_MATCHES} |<p>Filter to exclude discovered types</p> |`CHANGE_IF_NEEDED` |
+|{$SHAREPOINT.LLD.FILTER.TYPE.NOT_MATCHES} |<p>Filter to exclude discovered types.</p> |`CHANGE_IF_NEEDED` |
|{$SHAREPOINT.LLD_INTERVAL} |<p>-</p> |`3h` |
|{$SHAREPOINT.MAX_HEALT_SCORE} |<p>Must be in the range from 0 to 10</p><p>in details: https://docs.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-wsshp/c60ddeb6-4113-4a73-9e97-26b5c3907d33</p> |`5` |
|{$SHAREPOINT.PASSWORD} |<p>-</p> |`` |
@@ -51,7 +51,7 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Directory discovery |<p>-</p> |SCRIPT |sharepoint.directory.discovery<p>**Filter**:</p>AND <p>- A: {#SHAREPOINT.LLD.NAME} MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.NAME.MATCHES}`</p><p>- B: {#SHAREPOINT.LLD.NAME} NOT_MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.NAME.NOT_MATCHES}`</p><p>- C: {#SHAREPOINT.LLD.FULL_PATH} MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.FULL_PATH.MATCHES}`</p><p>- C: {#SHAREPOINT.LLD.FULL_PATH} NOT_MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.FULL_PATH.NOT_MATCHES}`</p><p>- E: {#SHAREPOINT.LLD.TYPE} MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.TYPE.MATCHES}`</p><p>- F: {#SHAREPOINT.LLD.TYPE} NOT_MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.TYPE.NOT_MATCHES}`</p> |
+|Directory discovery |<p>-</p> |SCRIPT |sharepoint.directory.discovery<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Filter**:</p>AND <p>- {#SHAREPOINT.LLD.NAME} MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.NAME.MATCHES}`</p><p>- {#SHAREPOINT.LLD.NAME} NOT_MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.NAME.NOT_MATCHES}`</p><p>- {#SHAREPOINT.LLD.FULL_PATH} MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.FULL_PATH.MATCHES}`</p><p>- {#SHAREPOINT.LLD.FULL_PATH} NOT_MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.FULL_PATH.NOT_MATCHES}`</p><p>- {#SHAREPOINT.LLD.TYPE} MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.TYPE.MATCHES}`</p><p>- {#SHAREPOINT.LLD.TYPE} NOT_MATCHES_REGEX `{$SHAREPOINT.LLD.FILTER.TYPE.NOT_MATCHES}`</p> |
## Items collected
@@ -60,19 +60,19 @@ There are no template links in this template.
|Sharepoint |Sharepoint: Get directory structure: Status |<p>HTTP response (status) code. Indicates whether the HTTP request was successfully completed. Additional information is available in the server log file.</p> |DEPENDENT |sharepoint.get_dir.status<p>**Preprocessing**:</p><p>- JSONPATH: `$.status`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> DISCARD_VALUE`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Sharepoint |Sharepoint: Get directory structure: Exec time |<p>The time taken to execute the script for obtaining the data structure (in ms). Less is better.</p> |DEPENDENT |sharepoint.get_dir.time<p>**Preprocessing**:</p><p>- JSONPATH: `$.time`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> DISCARD_VALUE`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|Sharepoint |Sharepoint: Health score |<p>This item specifies a value between 0 and 10, where 0 represents a low load and a high ability to process requests and 10 represents a high load and that the server is throttling requests to maintain adequate throughput.</p> |HTTP_AGENT |sharepoint.health_score<p>**Preprocessing**:</p><p>- REGEX: `X-SharePointHealthScore\b:\s(\d+) \1`</p><p>- IN_RANGE: `0 10`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|Sharepoint |Sharepoint: Size ({#SHAREPOINT.LLD.FULL_PATH}) |<p>Size of:</p><p>{#SHAREPOINT.LLD.FULL_PATH}</p> |DEPENDENT |sharepoint.size["{#SHAREPOINT.LLD.FULL_PATH}"]<p>**Preprocessing**:</p><p>- JSONPATH: `{#SHAREPOINT.LLD.JSON_PATH}.regsub("(.*)", \1)}.meta.size`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `24h`</p> |
-|Sharepoint |Sharepoint: Modified ({#SHAREPOINT.LLD.FULL_PATH}) |<p>Date of change:</p><p>{#SHAREPOINT.LLD.FULL_PATH}</p> |DEPENDENT |sharepoint.modified["{#SHAREPOINT.LLD.FULL_PATH}"]<p>**Preprocessing**:</p><p>- JSONPATH: `{#SHAREPOINT.LLD.JSON_PATH}.regsub("(.*)", \1)}.meta.modified`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|Sharepoint |Sharepoint: Created ({#SHAREPOINT.LLD.FULL_PATH}) |<p>Date of creation:</p><p>{#SHAREPOINT.LLD.FULL_PATH}</p> |DEPENDENT |sharepoint.created["{#SHAREPOINT.LLD.FULL_PATH}"]<p>**Preprocessing**:</p><p>- JSONPATH: `{#SHAREPOINT.LLD.JSON_PATH}.regsub("(.*)", \1)}.meta.created`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|Zabbix_raw_items |Sharepoint: Get directory structure |<p>Used to get directory structure information</p> |SCRIPT |sharepoint.get_dir<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED: ``</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> {"status":520,"data":{},"time":0}`</p> |
+|Sharepoint |Sharepoint: Size ({#SHAREPOINT.LLD.FULL_PATH}) |<p>Size of:</p><p>{#SHAREPOINT.LLD.FULL_PATH}</p> |DEPENDENT |sharepoint.size["{#SHAREPOINT.LLD.FULL_PATH}"]<p>**Preprocessing**:</p><p>- JSONPATH: `{{#SHAREPOINT.LLD.JSON_PATH}.regsub("(.*)", \1)}.meta.size`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `24h`</p> |
+|Sharepoint |Sharepoint: Modified ({#SHAREPOINT.LLD.FULL_PATH}) |<p>Date of change:</p><p>{#SHAREPOINT.LLD.FULL_PATH}</p> |DEPENDENT |sharepoint.modified["{#SHAREPOINT.LLD.FULL_PATH}"]<p>**Preprocessing**:</p><p>- JSONPATH: `{{#SHAREPOINT.LLD.JSON_PATH}.regsub("(.*)", \1)}.meta.modified`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|Sharepoint |Sharepoint: Created ({#SHAREPOINT.LLD.FULL_PATH}) |<p>Date of creation:</p><p>{#SHAREPOINT.LLD.FULL_PATH}</p> |DEPENDENT |sharepoint.created["{#SHAREPOINT.LLD.FULL_PATH}"]<p>**Preprocessing**:</p><p>- JSONPATH: `{{#SHAREPOINT.LLD.JSON_PATH}.regsub("(.*)", \1)}.meta.created`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|Zabbix_raw_items |Sharepoint: Get directory structure |<p>Used to get directory structure information</p> |SCRIPT |sharepoint.get_dir<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> {"status":520,"data":{},"time":0}`</p><p>**Expression**:</p>`The text is too long. Please see the template.` |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Sharepoint: Error getting directory structure. |<p>Error getting directory structure. Check the Zabbix server log for more details.</p> |`{TEMPLATE_NAME:sharepoint.get_dir.status.last()}<>200 ` |WARNING | |
-|Sharepoint: Server responds slowly to API request |<p>-</p> |`{TEMPLATE_NAME:sharepoint.get_dir.time.last()}>2000 ` |WARNING | |
-|Sharepoint: Bad health score | |`{TEMPLATE_NAME:sharepoint.health_score.last()}>"{$SHAREPOINT.MAX_HEALT_SCORE}" ` |AVERAGE | |
-|Sharepoint: Sharepoint object is changed |<p>Updated date of modification of folder / file </p> |`{TEMPLATE_NAME:sharepoint.modified["{#SHAREPOINT.LLD.FULL_PATH}"].diff()}=1` |INFO |<p>Manual close: YES</p> |
+|Sharepoint: Error getting directory structure. |<p>Error getting directory structure. Check the Zabbix server log for more details.</p> |`last(/Microsoft SharePoint by HTTP/sharepoint.get_dir.status)<>200` |WARNING | |
+|Sharepoint: Server responds slowly to API request |<p>-</p> |`last(/Microsoft SharePoint by HTTP/sharepoint.get_dir.time)>2000` |WARNING | |
+|Sharepoint: Bad health score |<p>-</p> |`last(/Microsoft SharePoint by HTTP/sharepoint.health_score)>"{$SHAREPOINT.MAX_HEALT_SCORE}"` |AVERAGE | |
+|Sharepoint: Sharepoint object is changed |<p>Updated date of modification of folder / file </p> |`last(/Microsoft SharePoint by HTTP/sharepoint.modified["{#SHAREPOINT.LLD.FULL_PATH}"],#1)<>last(/Microsoft SharePoint by HTTP/sharepoint.modified["{#SHAREPOINT.LLD.FULL_PATH}"],#2)` |INFO |<p>Manual close: YES</p> |
## Feedback
diff --git a/templates/app/sharepoint_http/template_app_sharepoint_http.yaml b/templates/app/sharepoint_http/template_app_sharepoint_http.yaml
index 63238551a89..fafaea28bf9 100644
--- a/templates/app/sharepoint_http/template_app_sharepoint_http.yaml
+++ b/templates/app/sharepoint_http/template_app_sharepoint_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:23Z'
+ date: '2021-12-19T15:19:50Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -14,9 +14,12 @@ zabbix_export:
Overview:
Template receives data via HTTP Agent.
Setup:
- Create a new host.
+ Create a new host.
Define macros according to your Sharepoint web portal.
It is recommended to fill in the values of the filter macros to avoid getting redundant data.
+
+
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -482,34 +485,36 @@ zabbix_export:
-
macro: '{$SHAREPOINT.LLD.FILTER.FULL_PATH.MATCHES}'
value: ^/
- description: 'Filter of discoverable dictionaries by full path'
+ description: 'Filter of discoverable dictionaries by full path.'
-
macro: '{$SHAREPOINT.LLD.FILTER.FULL_PATH.NOT_MATCHES}'
value: CHANGE_IF_NEEDED
- description: 'Filter to exclude discovered dictionaries by full path'
+ description: 'Filter to exclude discovered dictionaries by full path.'
-
macro: '{$SHAREPOINT.LLD.FILTER.NAME.MATCHES}'
value: '.*'
- description: 'Filter of discoverable dictionaries by name'
+ description: 'Filter of discoverable dictionaries by name.'
-
macro: '{$SHAREPOINT.LLD.FILTER.NAME.NOT_MATCHES}'
value: CHANGE_IF_NEEDED
- description: 'Filter to exclude discovered dictionaries by name'
+ description: 'Filter to exclude discovered dictionaries by name.'
-
macro: '{$SHAREPOINT.LLD.FILTER.TYPE.MATCHES}'
value: FOLDER
- description: 'Filter of discoverable types'
+ description: 'Filter of discoverable types.'
-
macro: '{$SHAREPOINT.LLD.FILTER.TYPE.NOT_MATCHES}'
value: CHANGE_IF_NEEDED
- description: 'Filter to exclude discovered types'
+ description: 'Filter to exclude discovered types.'
-
macro: '{$SHAREPOINT.LLD_INTERVAL}'
value: 3h
-
macro: '{$SHAREPOINT.MAX_HEALT_SCORE}'
value: '5'
- description: 'Must be in the range from 0 to 10 in details: https://docs.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-wsshp/c60ddeb6-4113-4a73-9e97-26b5c3907d33'
+ description: |
+ Must be in the range from 0 to 10
+ in details: https://docs.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-wsshp/c60ddeb6-4113-4a73-9e97-26b5c3907d33
-
macro: '{$SHAREPOINT.PASSWORD}'
type: SECRET_TEXT
diff --git a/templates/app/squid_snmp/README.md b/templates/app/squid_snmp/README.md
index 251c1075d8e..db3ed4d0f1d 100644
--- a/templates/app/squid_snmp/README.md
+++ b/templates/app/squid_snmp/README.md
@@ -11,6 +11,8 @@ This template was tested on:
## Setup
+> See [Zabbix template operation](https://www.zabbix.com/documentation/5.4/manual/config/templates_out_of_the_box/network_devices) for basic instructions.
+
### Setup Squid
Enable SNMP support following [official documentation](https://wiki.squid-cache.org/Features/Snmp).
Required parameters in squid.conf:
@@ -76,24 +78,24 @@ There are no template links in this template.
|Squid |Squid: Request hit ratio per 1 minute |<p>Byte Hit Ratios</p> |SNMP |squid[cacheRequestHitRatio.1] |
|Squid |Squid: Request hit ratio per 5 minutes |<p>Byte Hit Ratios</p> |SNMP |squid[cacheRequestHitRatio.5] |
|Squid |Squid: Request hit ratio per 1 hour |<p>Byte Hit Ratios</p> |SNMP |squid[cacheRequestHitRatio.60] |
-|Squid |Squid: Sys page faults per second |<p>Page faults with physical I/O</p> |SNMP |squid[cacheSysPageFaults]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: HTTP requests received per second |<p>Number of HTTP requests received</p> |SNMP |squid[cacheProtoClientHttpRequests]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: HTTP traffic received per second |<p>Number of HTTP traffic received from clients</p> |SNMP |squid[cacheHttpInKb]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: HTTP traffic sent per second |<p>Number of HTTP traffic sent to clients</p> |SNMP |squid[cacheHttpOutKb]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: HTTP Hits sent from cache per second |<p>Number of HTTP Hits sent to clients from cache</p> |SNMP |squid[cacheHttpHits]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: HTTP Errors sent per second |<p>Number of HTTP Errors sent to clients</p> |SNMP |squid[cacheHttpErrors]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: ICP messages sent per second |<p>Number of ICP messages sent</p> |SNMP |squid[cacheIcpPktsSent]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: ICP messages received per second |<p>Number of ICP messages received</p> |SNMP |squid[cacheIcpPktsRecv]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: ICP traffic transmitted per second |<p>Number of ICP traffic transmitted</p> |SNMP |squid[cacheIcpKbSent]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: ICP traffic received per second |<p>Number of ICP traffic received</p> |SNMP |squid[cacheIcpKbRecv]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: DNS server requests per second |<p>Number of external dns server requests</p> |SNMP |squid[cacheDnsRequests]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: DNS server replies per second |<p>Number of external dns server replies</p> |SNMP |squid[cacheDnsReplies]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: FQDN cache requests per second |<p>Number of FQDN Cache requests</p> |SNMP |squid[cacheFqdnRequests]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: FQDN cache hits per second |<p>Number of FQDN Cache hits</p> |SNMP |squid[cacheFqdnHits]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: FQDN cache misses per second |<p>Number of FQDN Cache misses</p> |SNMP |squid[cacheFqdnMisses]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: IP cache requests per second |<p>Number of IP Cache requests</p> |SNMP |squid[cacheIpRequests]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: IP cache hits per second |<p>Number of IP Cache hits</p> |SNMP |squid[cacheIpHits]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Squid |Squid: IP cache misses per second |<p>Number of IP Cache misses</p> |SNMP |squid[cacheIpMisses]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|Squid |Squid: Sys page faults per second |<p>Page faults with physical I/O</p> |SNMP |squid[cacheSysPageFaults]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: HTTP requests received per second |<p>Number of HTTP requests received</p> |SNMP |squid[cacheProtoClientHttpRequests]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: HTTP traffic received per second |<p>Number of HTTP traffic received from clients</p> |SNMP |squid[cacheHttpInKb]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: HTTP traffic sent per second |<p>Number of HTTP traffic sent to clients</p> |SNMP |squid[cacheHttpOutKb]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: HTTP Hits sent from cache per second |<p>Number of HTTP Hits sent to clients from cache</p> |SNMP |squid[cacheHttpHits]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: HTTP Errors sent per second |<p>Number of HTTP Errors sent to clients</p> |SNMP |squid[cacheHttpErrors]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: ICP messages sent per second |<p>Number of ICP messages sent</p> |SNMP |squid[cacheIcpPktsSent]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: ICP messages received per second |<p>Number of ICP messages received</p> |SNMP |squid[cacheIcpPktsRecv]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: ICP traffic transmitted per second |<p>Number of ICP traffic transmitted</p> |SNMP |squid[cacheIcpKbSent]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: ICP traffic received per second |<p>Number of ICP traffic received</p> |SNMP |squid[cacheIcpKbRecv]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: DNS server requests per second |<p>Number of external dns server requests</p> |SNMP |squid[cacheDnsRequests]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: DNS server replies per second |<p>Number of external dns server replies</p> |SNMP |squid[cacheDnsReplies]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: FQDN cache requests per second |<p>Number of FQDN Cache requests</p> |SNMP |squid[cacheFqdnRequests]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: FQDN cache hits per second |<p>Number of FQDN Cache hits</p> |SNMP |squid[cacheFqdnHits]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: FQDN cache misses per second |<p>Number of FQDN Cache misses</p> |SNMP |squid[cacheFqdnMisses]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: IP cache requests per second |<p>Number of IP Cache requests</p> |SNMP |squid[cacheIpRequests]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: IP cache hits per second |<p>Number of IP Cache hits</p> |SNMP |squid[cacheIpHits]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Squid |Squid: IP cache misses per second |<p>Number of IP Cache misses</p> |SNMP |squid[cacheIpMisses]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|Squid |Squid: Objects count |<p>Number of objects stored by the cache</p> |SNMP |squid[cacheNumObjCount] |
|Squid |Squid: Objects LRU expiration age |<p>Storage LRU Expiration Age</p> |SNMP |squid[cacheCurrentLRUExpiration]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.01`</p> |
|Squid |Squid: Objects unlinkd requests |<p>Requests given to unlinkd</p> |SNMP |squid[cacheCurrentUnlinkRequests] |
@@ -114,13 +116,13 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Squid: Port {$SQUID.HTTP.PORT} is down |<p>-</p> |`{TEMPLATE_NAME:net.tcp.service[tcp,,{$SQUID.HTTP.PORT}].last()}=0` |AVERAGE |<p>Manual close: YES</p> |
-|Squid: Squid has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:squid[cacheUptime].last()}<10m` |INFO |<p>Manual close: YES</p> |
-|Squid: Squid version has been changed |<p>Squid version has changed. Ack to close.</p> |`{TEMPLATE_NAME:squid[cacheVersionId].diff()}=1 and {TEMPLATE_NAME:squid[cacheVersionId].strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Squid: Swap usage is more than low watermark (>{ITEM.VALUE2}%) |<p>-</p> |`{TEMPLATE_NAME:squid[cacheCurrentSwapSize].last()}>{Squid SNMP:squid[cacheSwapLowWM].last()}*{Squid SNMP:squid[cacheSwapMaxSize].last()}/100` |WARNING | |
-|Squid: Swap usage is more than high watermark (>{ITEM.VALUE2}%) |<p>-</p> |`{TEMPLATE_NAME:squid[cacheCurrentSwapSize].last()}>{Squid SNMP:squid[cacheSwapHighWM].last()}*{Squid SNMP:squid[cacheSwapMaxSize].last()}/100` |HIGH | |
-|Squid: Squid is running out of file descriptors (<{$SQUID.FILE.DESC.WARN.MIN}) |<p>-</p> |`{TEMPLATE_NAME:squid[cacheCurrentUnusedFDescrCnt].last()}<{$SQUID.FILE.DESC.WARN.MIN}` |WARNING | |
-|Squid: High sys page faults rate (>{$SQUID.PAGE.FAULT.WARN}% of received HTTP requests) |<p>-</p> |`{TEMPLATE_NAME:squid[cacheSysPageFaults].avg(5m)}>{Squid SNMP:squid[cacheProtoClientHttpRequests].avg(5m)}/100*{$SQUID.PAGE.FAULT.WARN}` |WARNING | |
+|Squid: Port {$SQUID.HTTP.PORT} is down |<p>-</p> |`last(/Squid SNMP/net.tcp.service[tcp,,{$SQUID.HTTP.PORT}])=0` |AVERAGE |<p>Manual close: YES</p> |
+|Squid: Squid has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Squid SNMP/squid[cacheUptime])<10m` |INFO |<p>Manual close: YES</p> |
+|Squid: Squid version has been changed |<p>Squid version has changed. Ack to close.</p> |`last(/Squid SNMP/squid[cacheVersionId],#1)<>last(/Squid SNMP/squid[cacheVersionId],#2) and length(last(/Squid SNMP/squid[cacheVersionId]))>0` |INFO |<p>Manual close: YES</p> |
+|Squid: Swap usage is more than low watermark (>{ITEM.VALUE2}%) |<p>-</p> |`last(/Squid SNMP/squid[cacheCurrentSwapSize])>last(/Squid SNMP/squid[cacheSwapLowWM])*last(/Squid SNMP/squid[cacheSwapMaxSize])/100` |WARNING | |
+|Squid: Swap usage is more than high watermark (>{ITEM.VALUE2}%) |<p>-</p> |`last(/Squid SNMP/squid[cacheCurrentSwapSize])>last(/Squid SNMP/squid[cacheSwapHighWM])*last(/Squid SNMP/squid[cacheSwapMaxSize])/100` |HIGH | |
+|Squid: Squid is running out of file descriptors (<{$SQUID.FILE.DESC.WARN.MIN}) |<p>-</p> |`last(/Squid SNMP/squid[cacheCurrentUnusedFDescrCnt])<{$SQUID.FILE.DESC.WARN.MIN}` |WARNING | |
+|Squid: High sys page faults rate (>{$SQUID.PAGE.FAULT.WARN}% of received HTTP requests) |<p>-</p> |`avg(/Squid SNMP/squid[cacheSysPageFaults],5m)>avg(/Squid SNMP/squid[cacheProtoClientHttpRequests],5m)/100*{$SQUID.PAGE.FAULT.WARN}` |WARNING | |
## Feedback
diff --git a/templates/app/squid_snmp/template_app_squid_snmp.yaml b/templates/app/squid_snmp/template_app_squid_snmp.yaml
index 3021eee9842..8bb3137bb33 100644
--- a/templates/app/squid_snmp/template_app_squid_snmp.yaml
+++ b/templates/app/squid_snmp/template_app_squid_snmp.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-06-18T06:43:40Z'
+ date: '2021-12-19T15:19:51Z'
groups:
-
uuid: 57b7ae836ca64446ba2c296389c009b7
@@ -15,7 +15,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/409339-discussion-thread-for-official-zabbix-template-squid
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Modules
diff --git a/templates/app/systemd/README.md b/templates/app/systemd/README.md
index 3fad871c450..e894686a512 100644
--- a/templates/app/systemd/README.md
+++ b/templates/app/systemd/README.md
@@ -52,8 +52,8 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Service units discovery |<p>Discover systemd service units and their details.</p> |ZABBIX_PASSIVE |systemd.unit.discovery[service]<p>**Filter**:</p>AND <p>- A: {#UNIT.ACTIVESTATE} MATCHES_REGEX `{$SYSTEMD.ACTIVESTATE.SERVICE.MATCHES}`</p><p>- B: {#UNIT.ACTIVESTATE} NOT_MATCHES_REGEX `{$SYSTEMD.ACTIVESTATE.SERVICE.NOT_MATCHES}`</p><p>- C: {#UNIT.UNITFILESTATE} MATCHES_REGEX `{$SYSTEMD.UNITFILESTATE.SERVICE.MATCHES}`</p><p>- D: {#UNIT.UNITFILESTATE} NOT_MATCHES_REGEX `{$SYSTEMD.UNITFILESTATE.SERVICE.NOT_MATCHES}`</p><p>- E: {#UNIT.NAME} NOT_MATCHES_REGEX `{$SYSTEMD.NAME.SERVICE.NOT_MATCHES}`</p><p>- F: {#UNIT.NAME} MATCHES_REGEX `{$SYSTEMD.NAME.SERVICE.MATCHES}`</p> |
-|Socket units discovery |<p>Discover systemd socket units and their details.</p> |ZABBIX_PASSIVE |systemd.unit.discovery[socket]<p>**Filter**:</p>AND <p>- A: {#UNIT.ACTIVESTATE} MATCHES_REGEX `{$SYSTEMD.ACTIVESTATE.SOCKET.MATCHES}`</p><p>- B: {#UNIT.ACTIVESTATE} NOT_MATCHES_REGEX `{$SYSTEMD.ACTIVESTATE.SOCKET.NOT_MATCHES}`</p><p>- C: {#UNIT.UNITFILESTATE} MATCHES_REGEX `{$SYSTEMD.UNITFILESTATE.SOCKET.MATCHES}`</p><p>- D: {#UNIT.UNITFILESTATE} NOT_MATCHES_REGEX `{$SYSTEMD.UNITFILESTATE.SOCKET.NOT_MATCHES}`</p><p>- E: {#UNIT.NAME} NOT_MATCHES_REGEX `{$SYSTEMD.NAME.SOCKET.NOT_MATCHES}`</p><p>- F: {#UNIT.NAME} MATCHES_REGEX `{$SYSTEMD.NAME.SOCKET.MATCHES}`</p> |
+|Service units discovery |<p>Discover systemd service units and their details.</p> |ZABBIX_PASSIVE |systemd.unit.discovery[service]<p>**Filter**:</p>AND <p>- {#UNIT.ACTIVESTATE} MATCHES_REGEX `{$SYSTEMD.ACTIVESTATE.SERVICE.MATCHES}`</p><p>- {#UNIT.ACTIVESTATE} NOT_MATCHES_REGEX `{$SYSTEMD.ACTIVESTATE.SERVICE.NOT_MATCHES}`</p><p>- {#UNIT.UNITFILESTATE} MATCHES_REGEX `{$SYSTEMD.UNITFILESTATE.SERVICE.MATCHES}`</p><p>- {#UNIT.UNITFILESTATE} NOT_MATCHES_REGEX `{$SYSTEMD.UNITFILESTATE.SERVICE.NOT_MATCHES}`</p><p>- {#UNIT.NAME} NOT_MATCHES_REGEX `{$SYSTEMD.NAME.SERVICE.NOT_MATCHES}`</p><p>- {#UNIT.NAME} MATCHES_REGEX `{$SYSTEMD.NAME.SERVICE.MATCHES}`</p> |
+|Socket units discovery |<p>Discover systemd socket units and their details.</p> |ZABBIX_PASSIVE |systemd.unit.discovery[socket]<p>**Filter**:</p>AND <p>- {#UNIT.ACTIVESTATE} MATCHES_REGEX `{$SYSTEMD.ACTIVESTATE.SOCKET.MATCHES}`</p><p>- {#UNIT.ACTIVESTATE} NOT_MATCHES_REGEX `{$SYSTEMD.ACTIVESTATE.SOCKET.NOT_MATCHES}`</p><p>- {#UNIT.UNITFILESTATE} MATCHES_REGEX `{$SYSTEMD.UNITFILESTATE.SOCKET.MATCHES}`</p><p>- {#UNIT.UNITFILESTATE} NOT_MATCHES_REGEX `{$SYSTEMD.UNITFILESTATE.SOCKET.NOT_MATCHES}`</p><p>- {#UNIT.NAME} NOT_MATCHES_REGEX `{$SYSTEMD.NAME.SOCKET.NOT_MATCHES}`</p><p>- {#UNIT.NAME} MATCHES_REGEX `{$SYSTEMD.NAME.SOCKET.MATCHES}`</p> |
## Items collected
@@ -62,8 +62,8 @@ There are no template links in this template.
|Systemd |{#UNIT.NAME}: Active state |<p>State value that reflects whether the unit is currently active or not. The following states are currently defined: "active", "reloading", "inactive", "failed", "activating", and "deactivating".</p> |DEPENDENT |systemd.service.active_state["{#UNIT.NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.ActiveState.state`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Systemd |{#UNIT.NAME}: Load state |<p>State value that reflects whether the configuration file of this unit has been loaded. The following states are currently defined: "loaded", "error", and "masked".</p> |DEPENDENT |systemd.service.load_state["{#UNIT.NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.LoadState.state`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
|Systemd |{#UNIT.NAME}: Unit file state |<p>Encodes the install state of the unit file of FragmentPath. It currently knows the following states: "enabled", "enabled-runtime", "linked", "linked-runtime", "masked", "masked-runtime", "static", "disabled", and "invalid".</p> |DEPENDENT |systemd.service.unitfile_state["{#UNIT.NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.UnitFileState.state`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `30m`</p> |
-|Systemd |{#UNIT.NAME}: Active time |<p>Number of seconds since unit entered the active state.</p> |DEPENDENT |systemd.service.uptime["{#UNIT.NAME}"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-|Systemd |{#UNIT.NAME}: Connections accepted per sec |<p>The number of accepted socket connections (NAccepted) per second.</p> |DEPENDENT |systemd.socket.conn_accepted.rate["{#UNIT.NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.NAccepted`</p><p>- CHANGE_PER_SECOND |
+|Systemd |{#UNIT.NAME}: Active time |<p>Number of seconds since unit entered the active state.</p> |DEPENDENT |systemd.service.uptime["{#UNIT.NAME}"]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Systemd |{#UNIT.NAME}: Connections accepted per sec |<p>The number of accepted socket connections (NAccepted) per second.</p> |DEPENDENT |systemd.socket.conn_accepted.rate["{#UNIT.NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.NAccepted`</p><p>- CHANGE_PER_SECOND</p> |
|Systemd |{#UNIT.NAME}: Connections connected |<p>The current number of socket connections (NConnections).</p> |DEPENDENT |systemd.socket.conn_count["{#UNIT.NAME}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.NConnections`</p> |
|Zabbix_raw_items |{#UNIT.NAME}: Get unit info |<p>Returns all properties of a systemd service unit.</p><p> Unit description: {#UNIT.DESCRIPTION}.</p> |ZABBIX_PASSIVE |systemd.unit.get["{#UNIT.NAME}"] |
|Zabbix_raw_items |{#UNIT.NAME}: Get unit info |<p>Returns all properties of a systemd socket unit.</p><p> Unit description: {#UNIT.DESCRIPTION}.</p> |ZABBIX_PASSIVE |systemd.unit.get["{#UNIT.NAME}",Socket] |
@@ -72,8 +72,8 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|{#UNIT.NAME}: Service is not running |<p>-</p> |`{TEMPLATE_NAME:systemd.service.active_state["{#UNIT.NAME}"].last()}<>1` |WARNING |<p>Manual close: YES</p> |
-|{#UNIT.NAME}: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:systemd.service.uptime["{#UNIT.NAME}"].last()}<10m` |INFO |<p>Manual close: YES</p> |
+|{#UNIT.NAME}: Service is not running |<p>-</p> |`last(/Systemd by Zabbix agent 2/systemd.service.active_state["{#UNIT.NAME}"])<>1` |WARNING |<p>Manual close: YES</p> |
+|{#UNIT.NAME}: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Systemd by Zabbix agent 2/systemd.service.uptime["{#UNIT.NAME}"])<10m` |INFO |<p>Manual close: YES</p> |
## Feedback
diff --git a/templates/app/systemd/template_app_systemd.yaml b/templates/app/systemd/template_app_systemd.yaml
index 7adbe1fe942..fbe656fcd88 100644
--- a/templates/app/systemd/template_app_systemd.yaml
+++ b/templates/app/systemd/template_app_systemd.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-07-08T14:51:22Z'
+ date: '2021-12-19T15:19:51Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -17,7 +17,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -161,7 +161,6 @@ zabbix_export:
return Math.floor(Date.now()/1000)-Number(data.ActiveEnterTimestamp)/1000000;
}
return null;
-
master_item:
key: 'systemd.unit.get["{#UNIT.NAME}"]'
tags:
diff --git a/templates/app/tomcat_jmx/README.md b/templates/app/tomcat_jmx/README.md
index 2337c8b5fb5..96aeccd8402 100644
--- a/templates/app/tomcat_jmx/README.md
+++ b/templates/app/tomcat_jmx/README.md
@@ -1,5 +1,5 @@
-# Apache Tomcat JMX
+# Apache Tomcat by JMX
## Overview
@@ -10,7 +10,6 @@ Official JMX Template for Apache Tomcat.
This template was tested on:
- Apache Tomcat, version 8.5.59
-- Zabbix, version 5.4
## Setup
@@ -47,20 +46,20 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Global request processors discovery |<p>Discovery for GlobalRequesProcessor</p> |JMX |jmx.discovery[beans,"Catalina:type=GlobalRequestProcessor,name=*"]<p>**Filter**:</p>AND <p>- A: {#JMXNAME} MATCHES_REGEX `{$TOMCAT.LLD.FILTER.MATCHES:"GlobalRequesProcessor"}`</p><p>- B: {#JMXNAME} NOT_MATCHES_REGEX `{$TOMCAT.LLD.FILTER.NOT_MATCHES:"GlobalRequesProcessor"}`</p> |
-|Protocol handlers discovery |<p>Discovery for ProtocolHandler</p> |JMX |jmx.discovery[attributes,"Catalina:type=ProtocolHandler,port=*"]<p>**Filter**:</p>AND <p>- A: {#JMXATTR} MATCHES_REGEX `^name$`</p> |
-|Thread pools discovery |<p>Discovery for ThreadPool</p> |JMX |jmx.discovery[beans,"Catalina:type=ThreadPool,name=*"]<p>**Filter**:</p>AND <p>- A: {#JMXNAME} MATCHES_REGEX `{$TOMCAT.LLD.FILTER.MATCHES:"ThreadPool"}`</p><p>- B: {#JMXNAME} NOT_MATCHES_REGEX `{$TOMCAT.LLD.FILTER.NOT_MATCHES:"ThreadPool"}`</p> |
-|Contexts discovery |<p>Discovery for contexts</p> |JMX |jmx.discovery[beans,"Catalina:type=Manager,host=*,context=*"]<p>**Filter**:</p>AND <p>- A: {#JMXHOST} MATCHES_REGEX `{$TOMCAT.LLD.FILTER.MATCHES:"Manager"}`</p><p>- B: {#JMXHOST} NOT_MATCHES_REGEX `{$TOMCAT.LLD.FILTER.NOT_MATCHES:"Manager"}`</p> |
+|Global request processors discovery |<p>Discovery for GlobalRequesProcessor</p> |JMX |jmx.discovery[beans,"Catalina:type=GlobalRequestProcessor,name=*"]<p>**Filter**:</p>AND <p>- {#JMXNAME} MATCHES_REGEX `{$TOMCAT.LLD.FILTER.MATCHES:"GlobalRequesProcessor"}`</p><p>- {#JMXNAME} NOT_MATCHES_REGEX `{$TOMCAT.LLD.FILTER.NOT_MATCHES:"GlobalRequesProcessor"}`</p> |
+|Protocol handlers discovery |<p>Discovery for ProtocolHandler</p> |JMX |jmx.discovery[attributes,"Catalina:type=ProtocolHandler,port=*"]<p>**Filter**:</p>AND <p>- {#JMXATTR} MATCHES_REGEX `^name$`</p> |
+|Thread pools discovery |<p>Discovery for ThreadPool</p> |JMX |jmx.discovery[beans,"Catalina:type=ThreadPool,name=*"]<p>**Filter**:</p>AND <p>- {#JMXNAME} MATCHES_REGEX `{$TOMCAT.LLD.FILTER.MATCHES:"ThreadPool"}`</p><p>- {#JMXNAME} NOT_MATCHES_REGEX `{$TOMCAT.LLD.FILTER.NOT_MATCHES:"ThreadPool"}`</p> |
+|Contexts discovery |<p>Discovery for contexts</p> |JMX |jmx.discovery[beans,"Catalina:type=Manager,host=*,context=*"]<p>**Filter**:</p>AND <p>- {#JMXHOST} MATCHES_REGEX `{$TOMCAT.LLD.FILTER.MATCHES:"Manager"}`</p><p>- {#JMXHOST} NOT_MATCHES_REGEX `{$TOMCAT.LLD.FILTER.NOT_MATCHES:"Manager"}`</p> |
## Items collected
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
|Tomcat |Tomcat: Version |<p>The version of the Tomcat.</p> |JMX |jmx["Catalina:type=Server",serverInfo]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Tomcat |{#JMXNAME}: Bytes received per second |<p>Bytes received rate by processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},bytesReceived]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Tomcat |{#JMXNAME}: Bytes sent per second |<p>Bytes sent rate by processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},bytesSent]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Tomcat |{#JMXNAME}: Errors per second |<p>Error rate of request processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},errorCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Tomcat |{#JMXNAME}: Requests per second |<p>Rate of requests served by request processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},requestCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|Tomcat |{#JMXNAME}: Bytes received per second |<p>Bytes received rate by processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},bytesReceived]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Tomcat |{#JMXNAME}: Bytes sent per second |<p>Bytes sent rate by processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},bytesSent]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Tomcat |{#JMXNAME}: Errors per second |<p>Error rate of request processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},errorCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Tomcat |{#JMXNAME}: Requests per second |<p>Rate of requests served by request processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},requestCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|Tomcat |{#JMXNAME}: Requests processing time |<p>The total time to process all incoming requests of request processor {#JMXNAME}</p> |JMX |jmx[{#JMXOBJ},processingTime]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p> |
|Tomcat |{#JMXVALUE}: Gzip compression status |<p>Gzip compression status on {#JMXNAME}. Enabling gzip compression may save server bandwidth.</p> |JMX |jmx[{#JMXOBJ},compression]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Tomcat |{#JMXNAME}: Threads count |<p>Amount of threads the thread pool has right now, both busy and free.</p> |JMX |jmx[{#JMXOBJ},currentThreadCount]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
@@ -68,17 +67,17 @@ There are no template links in this template.
|Tomcat |{#JMXNAME}: Threads busy |<p>Number of the requests that are being currently handled.</p> |JMX |jmx[{#JMXOBJ},currentThreadsBusy] |
|Tomcat |{#JMXHOST}{#JMXCONTEXT}: Sessions active |<p>Active sessions of the application.</p> |JMX |jmx[{#JMXOBJ},activeSessions] |
|Tomcat |{#JMXHOST}{#JMXCONTEXT}: Sessions active maximum so far |<p>Maximum number of active sessions so far.</p> |JMX |jmx[{#JMXOBJ},maxActive] |
-|Tomcat |{#JMXHOST}{#JMXCONTEXT}: Sessions created per second |<p>Rate of sessions created by this application per second.</p> |JMX |jmx[{#JMXOBJ},sessionCounter]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Tomcat |{#JMXHOST}{#JMXCONTEXT}: Sessions rejected per second |<p>Rate of sessions we rejected due to maxActive being reached.</p> |JMX |jmx[{#JMXOBJ},rejectedSessions]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|Tomcat |{#JMXHOST}{#JMXCONTEXT}: Sessions created per second |<p>Rate of sessions created by this application per second.</p> |JMX |jmx[{#JMXOBJ},sessionCounter]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Tomcat |{#JMXHOST}{#JMXCONTEXT}: Sessions rejected per second |<p>Rate of sessions we rejected due to maxActive being reached.</p> |JMX |jmx[{#JMXOBJ},rejectedSessions]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|Tomcat |{#JMXHOST}{#JMXCONTEXT}: Sessions allowed maximum |<p>The maximum number of active Sessions allowed, or -1 for no limit.</p> |JMX |jmx[{#JMXOBJ},maxActiveSessions] |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Tomcat: Version has been changed |<p>Tomcat version has changed. Ack to close.</p> |`{TEMPLATE_NAME:jmx["Catalina:type=Server",serverInfo].diff()}=1 and {TEMPLATE_NAME:jmx["Catalina:type=Server",serverInfo].strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|{#JMXVALUE}: Gzip compression is disabled |<p>gzip compression is disabled for connector {#JMXVALUE}.</p> |`{TEMPLATE_NAME:jmx[{#JMXOBJ},compression].str(off)} = 1` |INFO |<p>Manual close: YES</p> |
-|{#JMXNAME}: Busy worker threads count are more than {$TOMCAT.THREADS.MAX.PCT:"{#JMXNAME}"}% of the limit for {$TOMCAT.THREADS.MAX.TIME:"{#JMXNAME}"} |<p>When current threads busy counter reaches the limit, no more requests could be handled, and the application chokes.</p> |`{TEMPLATE_NAME:jmx[{#JMXOBJ},currentThreadsBusy].min({$TOMCAT.THREADS.MAX.TIME:"{#JMXNAME}"})}>{Apache Tomcat JMX:jmx[{#JMXOBJ},maxThreads].last()}*{$TOMCAT.THREADS.MAX.PCT:"{#JMXNAME}"}/100` |HIGH | |
+|Tomcat: Version has been changed |<p>Tomcat version has changed. Ack to close.</p> |`last(/Apache Tomcat by JMX/jmx["Catalina:type=Server",serverInfo],#1)<>last(/Apache Tomcat by JMX/jmx["Catalina:type=Server",serverInfo],#2) and length(last(/Apache Tomcat by JMX/jmx["Catalina:type=Server",serverInfo]))>0` |INFO |<p>Manual close: YES</p> |
+|{#JMXVALUE}: Gzip compression is disabled |<p>gzip compression is disabled for connector {#JMXVALUE}.</p> |`find(/Apache Tomcat by JMX/jmx[{#JMXOBJ},compression],,"like","off") = 1` |INFO |<p>Manual close: YES</p> |
+|{#JMXNAME}: Busy worker threads count are more than {$TOMCAT.THREADS.MAX.PCT:"{#JMXNAME}"}% of the limit for {$TOMCAT.THREADS.MAX.TIME:"{#JMXNAME}"} |<p>When current threads busy counter reaches the limit, no more requests could be handled, and the application chokes.</p> |`min(/Apache Tomcat by JMX/jmx[{#JMXOBJ},currentThreadsBusy],{$TOMCAT.THREADS.MAX.TIME:"{#JMXNAME}"})>last(/Apache Tomcat by JMX/jmx[{#JMXOBJ},maxThreads])*{$TOMCAT.THREADS.MAX.PCT:"{#JMXNAME}"}/100` |HIGH | |
## Feedback
diff --git a/templates/app/tomcat_jmx/template_app_tomcat_jmx.yaml b/templates/app/tomcat_jmx/template_app_tomcat_jmx.yaml
index ad71b51cc20..d2588553f25 100644
--- a/templates/app/tomcat_jmx/template_app_tomcat_jmx.yaml
+++ b/templates/app/tomcat_jmx/template_app_tomcat_jmx.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:20Z'
+ date: '2021-12-19T15:19:52Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -8,8 +8,8 @@ zabbix_export:
templates:
-
uuid: 3cc8c9ae7055458c9a803597007f70bd
- template: 'Apache Tomcat JMX'
- name: 'Apache Tomcat JMX'
+ template: 'Apache Tomcat by JMX'
+ name: 'Apache Tomcat by JMX'
description: |
The template to monitor Apache Tomcat by Zabbix that work without any external scripts.
The metrics are collected by JMX.
@@ -17,7 +17,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/411862-discussion-thread-for-official-zabbix-template-tomcat
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -46,7 +46,7 @@ zabbix_export:
triggers:
-
uuid: 152d235652914aa2af78334385334214
- expression: 'last(/Apache Tomcat JMX/jmx["Catalina:type=Server",serverInfo],#1)<>last(/Apache Tomcat JMX/jmx["Catalina:type=Server",serverInfo],#2) and length(last(/Apache Tomcat JMX/jmx["Catalina:type=Server",serverInfo]))>0'
+ expression: 'last(/Apache Tomcat by JMX/jmx["Catalina:type=Server",serverInfo],#1)<>last(/Apache Tomcat by JMX/jmx["Catalina:type=Server",serverInfo],#2) and length(last(/Apache Tomcat by JMX/jmx["Catalina:type=Server",serverInfo]))>0'
name: 'Tomcat: Version has been changed'
priority: INFO
description: 'Tomcat version has changed. Ack to close.'
@@ -93,7 +93,7 @@ zabbix_export:
trigger_prototypes:
-
uuid: 7d707de73a164f1db6b69e4dcb8c6ae9
- expression: 'find(/Apache Tomcat JMX/jmx[{#JMXOBJ},compression],,"like","off") = 1'
+ expression: 'find(/Apache Tomcat by JMX/jmx[{#JMXOBJ},compression],,"like","off") = 1'
name: '{#JMXVALUE}: Gzip compression is disabled'
priority: INFO
description: 'gzip compression is disabled for connector {#JMXVALUE}.'
@@ -228,14 +228,14 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},bytesSent]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},bytesReceived]'
jmx_endpoint: 'service:jmx:rmi:///jndi/rmi://{HOST.CONN}:{HOST.PORT}/jmxrmi'
-
@@ -350,28 +350,28 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},maxActive]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},activeSessions]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},rejectedSessions]'
-
sortorder: '3'
drawtype: GRADIENT_LINE
color: A54F10
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},maxActiveSessions]'
jmx_endpoint: 'service:jmx:rmi:///jndi/rmi://{HOST.CONN}:{HOST.PORT}/jmxrmi'
-
@@ -451,7 +451,7 @@ zabbix_export:
trigger_prototypes:
-
uuid: 7a15296553a447f6aebf4958631b6704
- expression: 'min(/Apache Tomcat JMX/jmx[{#JMXOBJ},currentThreadsBusy],{$TOMCAT.THREADS.MAX.TIME:"{#JMXNAME}"})>last(/Apache Tomcat JMX/jmx[{#JMXOBJ},maxThreads])*{$TOMCAT.THREADS.MAX.PCT:"{#JMXNAME}"}/100'
+ expression: 'min(/Apache Tomcat by JMX/jmx[{#JMXOBJ},currentThreadsBusy],{$TOMCAT.THREADS.MAX.TIME:"{#JMXNAME}"})>last(/Apache Tomcat by JMX/jmx[{#JMXOBJ},maxThreads])*{$TOMCAT.THREADS.MAX.PCT:"{#JMXNAME}"}/100'
name: '{#JMXNAME}: Busy worker threads count are more than {$TOMCAT.THREADS.MAX.PCT:"{#JMXNAME}"}% of the limit for {$TOMCAT.THREADS.MAX.TIME:"{#JMXNAME}"}'
priority: HIGH
description: 'When current threads busy counter reaches the limit, no more requests could be handled, and the application chokes.'
@@ -464,21 +464,21 @@ zabbix_export:
drawtype: GRADIENT_LINE
color: 1A7C11
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},maxThreads]'
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: 2774A4
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},currentThreadCount]'
-
sortorder: '2'
drawtype: GRADIENT_LINE
color: F63100
item:
- host: 'Apache Tomcat JMX'
+ host: 'Apache Tomcat by JMX'
key: 'jmx[{#JMXOBJ},currentThreadsBusy]'
jmx_endpoint: 'service:jmx:rmi:///jndi/rmi://{HOST.CONN}:{HOST.PORT}/jmxrmi'
macros:
diff --git a/templates/app/travis_http/README.md b/templates/app/travis_http/README.md
index 6a48aae717e..b65f14486c4 100644
--- a/templates/app/travis_http/README.md
+++ b/templates/app/travis_http/README.md
@@ -50,7 +50,7 @@ There are no template links in this template.
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
-|Travis |Travis: Get health |<p>Getting home JSON using Travis API.</p> |HTTP_AGENT |travis.get_health<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED<p>- JAVASCRIPT: `return JSON.parse(value).config ? 1 : 0`</p> |
+|Travis |Travis: Get health |<p>Getting home JSON using Travis API.</p> |HTTP_AGENT |travis.get_health<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- JAVASCRIPT: `return JSON.parse(value).config ? 1 : 0`</p> |
|Travis |Travis: Jobs passed |<p>Total count of passed jobs in all repos.</p> |DEPENDENT |travis.jobs.total<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs.length()`</p> |
|Travis |Travis: Jobs active |<p>Active jobs in all repos.</p> |DEPENDENT |travis.jobs.active<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs[?(@.state == "started")].length()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
|Travis |Travis: Jobs in queue |<p>Jobs in queue in all repos.</p> |DEPENDENT |travis.jobs.queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.jobs[?(@.state == "received")].length()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
@@ -61,7 +61,7 @@ There are no template links in this template.
|Travis |Travis: Repo [{#SLUG}]: Builds passed |<p>Count of all passed builds in {#SLUG} repo.</p> |DEPENDENT |travis.repo.builds.passed[{#SLUG}]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `return JSON.parse(value).builds.filter(function (e){return e.state == "passed"}).length`</p> |
|Travis |Travis: Repo [{#SLUG}]: Builds failed |<p>Count of all failed builds in {#SLUG} repo.</p> |DEPENDENT |travis.repo.builds.failed[{#SLUG}]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
|Travis |Travis: Repo [{#SLUG}]: Builds total |<p>Count of total builds in {#SLUG} repo.</p> |DEPENDENT |travis.repo.builds.total[{#SLUG}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.builds.length()`</p> |
-|Travis |Travis: Repo [{#SLUG}]: Builds passed, % |<p>Percent of passed builds in {#SLUG} repo.</p> |CALCULATED |travis.repo.builds.passed.pct[{#SLUG}]<p>**Expression**:</p>`last(travis.repo.builds.passed[{#SLUG}])/last(travis.repo.builds.total[{#SLUG}])*100` |
+|Travis |Travis: Repo [{#SLUG}]: Builds passed, % |<p>Percent of passed builds in {#SLUG} repo.</p> |CALCULATED |travis.repo.builds.passed.pct[{#SLUG}]<p>**Expression**:</p>`last(//travis.repo.builds.passed[{#SLUG}])/last(//travis.repo.builds.total[{#SLUG}])*100` |
|Travis |Travis: Repo [{#SLUG}]: Description |<p>Description of Travis repo (git project description).</p> |DEPENDENT |travis.repo.description[{#SLUG}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.repositories[?(@.slug == "{#SLUG}")].description.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Travis |Travis: Repo [{#SLUG}]: Last build duration |<p>Last build duration in {#SLUG} repo.</p> |DEPENDENT |travis.repo.last_build.duration[{#SLUG}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.builds[0].duration`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Travis |Travis: Repo [{#SLUG}]: Last build state |<p>Last build state in {#SLUG} repo.</p> |DEPENDENT |travis.repo.last_build.state[{#SLUG}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.builds[0].state`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
@@ -77,10 +77,10 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Travis: Service is unavailable |<p>Travis API is unavailable. Please check if the correct macros are set.</p> |`{TEMPLATE_NAME:travis.get_health.last()}=0` |HIGH |<p>Manual close: YES</p> |
-|Travis: Failed to fetch home page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`{TEMPLATE_NAME:travis.get_health.nodata(30m)}=1` |WARNING |<p>Manual close: YES</p> |
-|Travis: Repo [{#SLUG}]: Percent of successful builds are < {$TRAVIS.BUILDS.SUCCESS.PERCENT}% |<p>Low successful builds rate.</p> |`{TEMPLATE_NAME:travis.repo.builds.passed.pct[{#SLUG}].last()}<{$TRAVIS.BUILDS.SUCCESS.PERCENT}` |WARNING |<p>Manual close: YES</p> |
-|Travis: Repo [{#SLUG}]: Last build status is 'errored' |<p>Last build status is errored.</p> |`{TEMPLATE_NAME:travis.repo.last_build.state[{#SLUG}].str(errored)}=1` |WARNING |<p>Manual close: YES</p> |
+|Travis: Service is unavailable |<p>Travis API is unavailable. Please check if the correct macros are set.</p> |`last(/Travis CI by HTTP/travis.get_health)=0` |HIGH |<p>Manual close: YES</p> |
+|Travis: Failed to fetch home page (or no data for 30m) |<p>Zabbix has not received data for items for the last 30 minutes.</p> |`nodata(/Travis CI by HTTP/travis.get_health,30m)=1` |WARNING |<p>Manual close: YES</p> |
+|Travis: Repo [{#SLUG}]: Percent of successful builds are < {$TRAVIS.BUILDS.SUCCESS.PERCENT}% |<p>Low successful builds rate.</p> |`last(/Travis CI by HTTP/travis.repo.builds.passed.pct[{#SLUG}])<{$TRAVIS.BUILDS.SUCCESS.PERCENT}` |WARNING |<p>Manual close: YES</p> |
+|Travis: Repo [{#SLUG}]: Last build status is 'errored' |<p>Last build status is errored.</p> |`find(/Travis CI by HTTP/travis.repo.last_build.state[{#SLUG}],,"like","errored")=1` |WARNING |<p>Manual close: YES</p> |
## Feedback
diff --git a/templates/app/travis_http/template_app_travis_ci_http.yaml b/templates/app/travis_http/template_app_travis_ci_http.yaml
index 4a977a3c060..090f8f40e83 100644
--- a/templates/app/travis_http/template_app_travis_ci_http.yaml
+++ b/templates/app/travis_http/template_app_travis_ci_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-08-18T12:34:02Z'
+ date: '2021-12-19T15:19:53Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -14,13 +14,13 @@ zabbix_export:
Template for monitoring Travis CI https://travis-ci.com
You must set {$TRAVIS.API.TOKEN} and {$TRAVIS.API.URL} macros.
{$TRAVIS.API.TOKEN} is a Travis API authentication token located in User -> Settings -> API authentication.
- {$TRAVIS.API.URL} could be in 3 different variations:
+ {$TRAVIS.API.URL} could be in 2 different variations:
- for a private project : api.travis-ci.com
- for an enterprise projects: api.example.com (where you replace example.com with the domain Travis CI is running on)
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -79,7 +79,6 @@ zabbix_export:
-
name: limit
value: '0'
- status_codes: ''
headers:
-
name: Travis-API-Version
@@ -148,7 +147,6 @@ zabbix_export:
value_type: TEXT
description: 'Getting jobs using Travis API.'
url: 'https://{$TRAVIS.API.URL}/jobs'
- status_codes: ''
headers:
-
name: Travis-API-Version
@@ -174,7 +172,6 @@ zabbix_export:
-
name: limit
value: '0'
- status_codes: ''
headers:
-
name: Travis-API-Version
@@ -414,7 +411,6 @@ zabbix_export:
-
name: limit
value: '50'
- status_codes: ''
headers:
-
name: Travis-API-Version
@@ -440,7 +436,6 @@ zabbix_export:
-
name: limit
value: '50'
- status_codes: ''
headers:
-
name: Travis-API-Version
@@ -588,7 +583,7 @@ zabbix_export:
var result = [];
JSON.parse(value).repositories.forEach(function (e) {
- result.push({ '{#ID}': e.id, '{#SLUG}': e.slug });
+ result.push({ '{#ID}': e.id, '{#SLUG}': e.slug });
});
return JSON.stringify(result)
diff --git a/templates/app/vault_http/README.md b/templates/app/vault_http/README.md
index 82cd5b3733d..5d836d7ed78 100644
--- a/templates/app/vault_http/README.md
+++ b/templates/app/vault_http/README.md
@@ -7,7 +7,7 @@ For Zabbix version: 5.4 and higher
The template to monitor HashiCorp Vault by Zabbix that work without any external scripts.
Most of the metrics are collected in one go, thanks to Zabbix bulk data collection.
-Template `Vault by HTTP` — collects metrics by HTTP agent from `/sys/metrics` API endpoint.
+Template `Vault by HTTP` — collects metrics by HTTP agent from `/sys/metrics` API endpoint.
See https://www.vaultproject.io/api-docs/system/metrics.
@@ -63,105 +63,105 @@ There are no template links in this template.
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
-|Vault |Vault: Initialized |<p>Initialization status.</p> |DEPENDENT |vault.health.initialized<p>**Preprocessing**:</p><p>- JSONPATH: `$.initialized`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: Sealed |<p>Seal status.</p> |DEPENDENT |vault.health.sealed<p>**Preprocessing**:</p><p>- JSONPATH: `$.sealed`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: Standby |<p>Standby status.</p> |DEPENDENT |vault.health.standby<p>**Preprocessing**:</p><p>- JSONPATH: `$.standby`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: Performance standby |<p>Performance standby status.</p> |DEPENDENT |vault.health.performance_standby<p>**Preprocessing**:</p><p>- JSONPATH: `$.performance_standby`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: Initialized |<p>Initialization status.</p> |DEPENDENT |vault.health.initialized<p>**Preprocessing**:</p><p>- JSONPATH: `$.initialized`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: Sealed |<p>Seal status.</p> |DEPENDENT |vault.health.sealed<p>**Preprocessing**:</p><p>- JSONPATH: `$.sealed`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: Standby |<p>Standby status.</p> |DEPENDENT |vault.health.standby<p>**Preprocessing**:</p><p>- JSONPATH: `$.standby`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: Performance standby |<p>Performance standby status.</p> |DEPENDENT |vault.health.performance_standby<p>**Preprocessing**:</p><p>- JSONPATH: `$.performance_standby`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Vault |Vault: Performance replication |<p>Performance replication mode</p><p>https://www.vaultproject.io/docs/enterprise/replication</p> |DEPENDENT |vault.health.replication_performance_mode<p>**Preprocessing**:</p><p>- JSONPATH: `$.replication_performance_mode`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Vault |Vault: Disaster Recovery replication |<p>Disaster recovery replication mode</p><p>https://www.vaultproject.io/docs/enterprise/replication</p> |DEPENDENT |vault.health.replication_dr_mode<p>**Preprocessing**:</p><p>- JSONPATH: `$.replication_dr_mode`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Vault |Vault: Version |<p>Server version.</p> |DEPENDENT |vault.health.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.version`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Vault |Vault: Healthcheck |<p>Vault healthcheck.</p> |DEPENDENT |vault.health.check<p>**Preprocessing**:</p><p>- JSONPATH: `$.healthcheck`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 1`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: HA enabled |<p>HA enabled status.</p> |DEPENDENT |vault.leader.ha_enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.ha_enabled`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: Is leader |<p>Leader status.</p> |DEPENDENT |vault.leader.is_self<p>**Preprocessing**:</p><p>- JSONPATH: `$.is_self`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: HA enabled |<p>HA enabled status.</p> |DEPENDENT |vault.leader.ha_enabled<p>**Preprocessing**:</p><p>- JSONPATH: `$.ha_enabled`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: Is leader |<p>Leader status.</p> |DEPENDENT |vault.leader.is_self<p>**Preprocessing**:</p><p>- JSONPATH: `$.is_self`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Vault |Vault: Get metrics error |<p>Get metrics error.</p> |DEPENDENT |vault.get_metrics.error<p>**Preprocessing**:</p><p>- JSONPATH: `$.errors[0]`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: Process CPU seconds, total |<p>Total user and system CPU time spent in seconds.</p> |DEPENDENT |vault.metrics.process.cpu.seconds.total<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_cpu_seconds_total `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Open file descriptors, max |<p>Maximum number of open file descriptors.</p> |DEPENDENT |vault.metrics.process.max.fds<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_max_fds `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: Open file descriptors, current |<p>Number of open file descriptors.</p> |DEPENDENT |vault.metrics.process.open.fds<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_open_fds `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Process resident memory |<p>Resident memory size in bytes.</p> |DEPENDENT |vault.metrics.process.resident_memory.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_resident_memory_bytes `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Uptime |<p>Server uptime.</p> |DEPENDENT |vault.metrics.process.uptime<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_start_time_seconds `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- JAVASCRIPT: `return Math.floor(Date.now()/1000 - Number(value));`</p> |
-|Vault |Vault: Process virtual memory, current |<p>Virtual memory size in bytes.</p> |DEPENDENT |vault.metrics.process.virtual_memory.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_virtual_memory_bytes `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Process virtual memory, max |<p>Maximum amount of virtual memory available in bytes.</p> |DEPENDENT |vault.metrics.process.virtual_memory.max.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_virtual_memory_max_bytes `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: Audit log requests, rate |<p>Number of all audit log requests across all audit log devices.</p> |DEPENDENT |vault.metrics.audit.log.request.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_audit_log_request_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Audit log request failures, rate |<p>Number of audit log request failures.</p> |DEPENDENT |vault.metrics.audit.log.request.failure.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_audit_log_request_failure `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Audit log response, rate |<p>Number of audit log responses across all audit log devices.</p> |DEPENDENT |vault.metrics.audit.log.response.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_audit_log_response_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Audit log response failures, rate |<p>Number of audit log response failures.</p> |DEPENDENT |vault.metrics.audit.log.response.failure.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_audit_log_response_failure `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Barrier DELETE ops, rate |<p>Number of DELETE operations at the barrier.</p> |DEPENDENT |vault.metrics.barrier.delete.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_barrier_delete_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Barrier GET ops, rate |<p>Number of GET operations at the barrier.</p> |DEPENDENT |vault.metrics.vault.barrier.get.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_barrier_get_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Barrier LIST ops, rate |<p>Number of LIST operations at the barrier.</p> |DEPENDENT |vault.metrics.barrier.list.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_barrier_list_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Barrier PUT ops, rate |<p>Number of PUT operations at the barrier.</p> |DEPENDENT |vault.metrics.barrier.put.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_barrier_put_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Cache hit, rate |<p>Number of times a value was retrieved from the LRU cache.</p> |DEPENDENT |vault.metrics.cache.hit.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_cache_hit `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Cache miss, rate |<p>Number of times a value was not in the LRU cache. The results in a read from the configured storage.</p> |DEPENDENT |vault.metrics.cache.miss.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_cache_miss `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Cache write, rate |<p>Number of times a value was written to the LRU cache.</p> |DEPENDENT |vault.metrics.cache.write.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_cache_write `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Check token, rate |<p>Number of token checks handled by Vault corecore.</p> |DEPENDENT |vault.metrics.core.check.token.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_check_token_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Fetch ACL and token, rate |<p>Number of ACL and corresponding token entry fetches handled by Vault core.</p> |DEPENDENT |vault.metrics.core.fetch.acl_and_token<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_fetch_acl_and_token_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Requests, rate |<p>Number of requests handled by Vault core.</p> |DEPENDENT |vault.metrics.core.handle.request<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_handle_request_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
+|Vault |Vault: Process CPU seconds, total |<p>Total user and system CPU time spent in seconds.</p> |DEPENDENT |vault.metrics.process.cpu.seconds.total<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_cpu_seconds_total`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Open file descriptors, max |<p>Maximum number of open file descriptors.</p> |DEPENDENT |vault.metrics.process.max.fds<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_max_fds`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: Open file descriptors, current |<p>Number of open file descriptors.</p> |DEPENDENT |vault.metrics.process.open.fds<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_open_fds`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Process resident memory |<p>Resident memory size in bytes.</p> |DEPENDENT |vault.metrics.process.resident_memory.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_resident_memory_bytes`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Uptime |<p>Server uptime.</p> |DEPENDENT |vault.metrics.process.uptime<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_start_time_seconds`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- JAVASCRIPT: `return Math.floor(Date.now()/1000 - Number(value));`</p> |
+|Vault |Vault: Process virtual memory, current |<p>Virtual memory size in bytes.</p> |DEPENDENT |vault.metrics.process.virtual_memory.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_virtual_memory_bytes`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Process virtual memory, max |<p>Maximum amount of virtual memory available in bytes.</p> |DEPENDENT |vault.metrics.process.virtual_memory.max.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `process_virtual_memory_max_bytes`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: Audit log requests, rate |<p>Number of all audit log requests across all audit log devices.</p> |DEPENDENT |vault.metrics.audit.log.request.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_audit_log_request_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Audit log request failures, rate |<p>Number of audit log request failures.</p> |DEPENDENT |vault.metrics.audit.log.request.failure.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_audit_log_request_failure`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Audit log response, rate |<p>Number of audit log responses across all audit log devices.</p> |DEPENDENT |vault.metrics.audit.log.response.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_audit_log_response_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Audit log response failures, rate |<p>Number of audit log response failures.</p> |DEPENDENT |vault.metrics.audit.log.response.failure.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_audit_log_response_failure`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Barrier DELETE ops, rate |<p>Number of DELETE operations at the barrier.</p> |DEPENDENT |vault.metrics.barrier.delete.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_barrier_delete_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Barrier GET ops, rate |<p>Number of GET operations at the barrier.</p> |DEPENDENT |vault.metrics.vault.barrier.get.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_barrier_get_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Barrier LIST ops, rate |<p>Number of LIST operations at the barrier.</p> |DEPENDENT |vault.metrics.barrier.list.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_barrier_list_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Barrier PUT ops, rate |<p>Number of PUT operations at the barrier.</p> |DEPENDENT |vault.metrics.barrier.put.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_barrier_put_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Cache hit, rate |<p>Number of times a value was retrieved from the LRU cache.</p> |DEPENDENT |vault.metrics.cache.hit.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_cache_hit`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Cache miss, rate |<p>Number of times a value was not in the LRU cache. The results in a read from the configured storage.</p> |DEPENDENT |vault.metrics.cache.miss.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_cache_miss`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Cache write, rate |<p>Number of times a value was written to the LRU cache.</p> |DEPENDENT |vault.metrics.cache.write.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_cache_write`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Check token, rate |<p>Number of token checks handled by Vault corecore.</p> |DEPENDENT |vault.metrics.core.check.token.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_check_token_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Fetch ACL and token, rate |<p>Number of ACL and corresponding token entry fetches handled by Vault core.</p> |DEPENDENT |vault.metrics.core.fetch.acl_and_token<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_fetch_acl_and_token_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Requests, rate |<p>Number of requests handled by Vault core.</p> |DEPENDENT |vault.metrics.core.handle.request<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_handle_request_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
|Vault |Vault: Leadership setup failed, counter |<p>Cluster leadership setup failures which have occurred in a highly available Vault cluster.</p> |DEPENDENT |vault.metrics.core.leadership.setup_failed<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_core_leadership_setup_failed`</p><p>- JSONPATH: `$[?(@.name=="vault_core_leadership_setup_failed")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
|Vault |Vault: Leadership setup lost, counter |<p>Cluster leadership losses which have occurred in a highly available Vault cluster.</p> |DEPENDENT |vault.metrics.core.leadership_lost<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_core_leadership_lost_count`</p><p>- JSONPATH: `$[?(@.name=="vault_core_leadership_lost_count")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-|Vault |Vault: Post-unseal ops, counter |<p>Duration of time taken by post-unseal operations handled by Vault core.</p> |DEPENDENT |vault.metrics.core.post_unseal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_post_unseal_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Pre-seal ops, counter |<p>Duration of time taken by pre-seal operations.</p> |DEPENDENT |vault.metrics.core.pre_seal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_pre_seal_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Requested seal ops, counter |<p>Duration of time taken by requested seal operations.</p> |DEPENDENT |vault.metrics.core.seal_with_request<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_seal_with_request_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Seal ops, counter |<p>Duration of time taken by seal operations.</p> |DEPENDENT |vault.metrics.core.seal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_seal_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Internal seal ops, counter |<p>Duration of time taken by internal seal operations.</p> |DEPENDENT |vault.metrics.core.seal_internal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_seal_internal_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Post-unseal ops, counter |<p>Duration of time taken by post-unseal operations handled by Vault core.</p> |DEPENDENT |vault.metrics.core.post_unseal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_post_unseal_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Pre-seal ops, counter |<p>Duration of time taken by pre-seal operations.</p> |DEPENDENT |vault.metrics.core.pre_seal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_pre_seal_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Requested seal ops, counter |<p>Duration of time taken by requested seal operations.</p> |DEPENDENT |vault.metrics.core.seal_with_request<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_seal_with_request_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Seal ops, counter |<p>Duration of time taken by seal operations.</p> |DEPENDENT |vault.metrics.core.seal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_seal_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Internal seal ops, counter |<p>Duration of time taken by internal seal operations.</p> |DEPENDENT |vault.metrics.core.seal_internal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_seal_internal_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Vault |Vault: Leadership step downs, counter |<p>Cluster leadership step down.</p> |DEPENDENT |vault.metrics.core.step_down<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_core_step_down_count`</p><p>- JSONPATH: `$[?(@.name=="vault_core_step_down_count")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-|Vault |Vault: Unseal ops, counter |<p>Duration of time taken by unseal operations.</p> |DEPENDENT |vault.metrics.core.unseal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_unseal_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Fetch lease times, counter |<p>Time taken to fetch lease times.</p> |DEPENDENT |vault.metrics.expire.fetch.lease.times<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_fetch_lease_times_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Fetch lease times by token, counter |<p>Time taken to fetch lease times by token.</p> |DEPENDENT |vault.metrics.expire.fetch.lease.times.by_token<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_fetch_lease_times_by_token_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Number of expiring leases |<p>Number of all leases which are eligible for eventual expiry.</p> |DEPENDENT |vault.metrics.expire.num_leases<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_num_leases `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Expire revoke, count |<p>Time taken to revoke a token.</p> |DEPENDENT |vault.metrics.expire.revoke<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_revoke_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Expire revoke force, count |<p>Time taken to forcibly revoke a token.</p> |DEPENDENT |vault.metrics.expire.revoke.force<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_revoke_force_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Expire revoke prefix, count |<p>Tokens revoke on a prefix.</p> |DEPENDENT |vault.metrics.expire.revoke.prefix<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_revoke_prefix_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Revoke secrets by token, count |<p>Time taken to revoke all secrets issued with a given token.</p> |DEPENDENT |vault.metrics.expire.revoke.by_token<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_revoke_by_token_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Expire renew, count |<p>Time taken to renew a lease.</p> |DEPENDENT |vault.metrics.expire.renew<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_renew_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Renew token, count |<p>Time taken to renew a token which does not need to invoke a logical backend.</p> |DEPENDENT |vault.metrics.expire.renew_token<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_renew_token_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Register ops, count |<p>Time taken for register operations.</p> |DEPENDENT |vault.metrics.expire.register<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_register_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Register auth ops, count |<p>Time taken for register authentication operations which create lease entries without lease ID.</p> |DEPENDENT |vault.metrics.expire.register.auth<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_register_auth_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Policy GET ops, rate |<p>Number of operations to get a policy.</p> |DEPENDENT |vault.metrics.policy.get_policy.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_policy_get_policy_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Policy LIST ops, rate |<p>Number of operations to list policies.</p> |DEPENDENT |vault.metrics.policy.list_policies.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_policy_list_policies_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Policy DELETE ops, rate |<p>Number of operations to delete a policy.</p> |DEPENDENT |vault.metrics.policy.delete_policy.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_policy_delete_policy_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Policy SET ops, rate |<p>Number of operations to set a policy.</p> |DEPENDENT |vault.metrics.policy.set_policy.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_policy_set_policy_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Token create, count |<p>The time taken to create a token.</p> |DEPENDENT |vault.metrics.token.create<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_create_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Token createAccessor, count |<p>The time taken to create a token accessor.</p> |DEPENDENT |vault.metrics.token.createAccessor<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_createAccessor_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Token lookup, rate |<p>Number of token look up.</p> |DEPENDENT |vault.metrics.token.lookup.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_lookup_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Token revoke, count |<p>The time taken to look up a token.</p> |DEPENDENT |vault.metrics.token.revoke<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_revoke_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Token revoke tree, count |<p>Time taken to revoke a token tree.</p> |DEPENDENT |vault.metrics.token.revoke.tree<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_revoke_tree_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Token store, count |<p>Time taken to store an updated token entry without writing to the secondary index.</p> |DEPENDENT |vault.metrics.token.store<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_store_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Runtime allocated bytes |<p>Number of bytes allocated by the Vault process. This could burst from time to time, but should return to a steady state value.</p> |DEPENDENT |vault.metrics.runtime.alloc.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_alloc_bytes `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Runtime freed objects |<p>Number of freed objects.</p> |DEPENDENT |vault.metrics.runtime.free.count<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_free_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Runtime heap objects |<p>Number of objects on the heap. This is a good general memory pressure indicator worth establishing a baseline and thresholds for alerting.</p> |DEPENDENT |vault.metrics.runtime.heap.objects<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_heap_objects `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Runtime malloc count |<p>Cumulative count of allocated heap objects.</p> |DEPENDENT |vault.metrics.runtime.malloc.count<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_malloc_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Runtime num goroutines |<p>Number of goroutines. This serves as a general system load indicator worth establishing a baseline and thresholds for alerting.</p> |DEPENDENT |vault.metrics.runtime.num_goroutines<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_num_goroutines `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Runtime sys bytes |<p>Number of bytes allocated to Vault. This includes what is being used by Vault's heap and what has been reclaimed but not given back to the operating system.</p> |DEPENDENT |vault.metrics.runtime.sys.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_sys_bytes `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Runtime GC pause, total |<p>The total garbage collector pause time since Vault was last started.</p> |DEPENDENT |vault.metrics.total.gc.pause<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_total_gc_pause_ns `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `1.0E-9`</p> |
-|Vault |Vault: Runtime GC runs, total |<p>Total number of garbage collection runs since Vault was last started.</p> |DEPENDENT |vault.metrics.runtime.total.gc.runs<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_total_gc_runs `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Unseal ops, counter |<p>Duration of time taken by unseal operations.</p> |DEPENDENT |vault.metrics.core.unseal<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_core_unseal_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Fetch lease times, counter |<p>Time taken to fetch lease times.</p> |DEPENDENT |vault.metrics.expire.fetch.lease.times<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_fetch_lease_times_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Fetch lease times by token, counter |<p>Time taken to fetch lease times by token.</p> |DEPENDENT |vault.metrics.expire.fetch.lease.times.by_token<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_fetch_lease_times_by_token_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Number of expiring leases |<p>Number of all leases which are eligible for eventual expiry.</p> |DEPENDENT |vault.metrics.expire.num_leases<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_num_leases`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Expire revoke, count |<p>Time taken to revoke a token.</p> |DEPENDENT |vault.metrics.expire.revoke<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_revoke_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Expire revoke force, count |<p>Time taken to forcibly revoke a token.</p> |DEPENDENT |vault.metrics.expire.revoke.force<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_revoke_force_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Expire revoke prefix, count |<p>Tokens revoke on a prefix.</p> |DEPENDENT |vault.metrics.expire.revoke.prefix<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_revoke_prefix_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Revoke secrets by token, count |<p>Time taken to revoke all secrets issued with a given token.</p> |DEPENDENT |vault.metrics.expire.revoke.by_token<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_revoke_by_token_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Expire renew, count |<p>Time taken to renew a lease.</p> |DEPENDENT |vault.metrics.expire.renew<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_renew_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Renew token, count |<p>Time taken to renew a token which does not need to invoke a logical backend.</p> |DEPENDENT |vault.metrics.expire.renew_token<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_renew_token_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Register ops, count |<p>Time taken for register operations.</p> |DEPENDENT |vault.metrics.expire.register<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_register_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Register auth ops, count |<p>Time taken for register authentication operations which create lease entries without lease ID.</p> |DEPENDENT |vault.metrics.expire.register.auth<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_expire_register_auth_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Policy GET ops, rate |<p>Number of operations to get a policy.</p> |DEPENDENT |vault.metrics.policy.get_policy.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_policy_get_policy_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Policy LIST ops, rate |<p>Number of operations to list policies.</p> |DEPENDENT |vault.metrics.policy.list_policies.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_policy_list_policies_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Policy DELETE ops, rate |<p>Number of operations to delete a policy.</p> |DEPENDENT |vault.metrics.policy.delete_policy.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_policy_delete_policy_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Policy SET ops, rate |<p>Number of operations to set a policy.</p> |DEPENDENT |vault.metrics.policy.set_policy.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_policy_set_policy_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Token create, count |<p>The time taken to create a token.</p> |DEPENDENT |vault.metrics.token.create<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_create_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Token createAccessor, count |<p>The time taken to create a token accessor.</p> |DEPENDENT |vault.metrics.token.createAccessor<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_createAccessor_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Token lookup, rate |<p>Number of token look up.</p> |DEPENDENT |vault.metrics.token.lookup.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_lookup_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Token revoke, count |<p>The time taken to look up a token.</p> |DEPENDENT |vault.metrics.token.revoke<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_revoke_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Token revoke tree, count |<p>Time taken to revoke a token tree.</p> |DEPENDENT |vault.metrics.token.revoke.tree<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_revoke_tree_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Token store, count |<p>Time taken to store an updated token entry without writing to the secondary index.</p> |DEPENDENT |vault.metrics.token.store<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_token_store_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Runtime allocated bytes |<p>Number of bytes allocated by the Vault process. This could burst from time to time, but should return to a steady state value.</p> |DEPENDENT |vault.metrics.runtime.alloc.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_alloc_bytes`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Runtime freed objects |<p>Number of freed objects.</p> |DEPENDENT |vault.metrics.runtime.free.count<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_free_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Runtime heap objects |<p>Number of objects on the heap. This is a good general memory pressure indicator worth establishing a baseline and thresholds for alerting.</p> |DEPENDENT |vault.metrics.runtime.heap.objects<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_heap_objects`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Runtime malloc count |<p>Cumulative count of allocated heap objects.</p> |DEPENDENT |vault.metrics.runtime.malloc.count<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_malloc_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Runtime num goroutines |<p>Number of goroutines. This serves as a general system load indicator worth establishing a baseline and thresholds for alerting.</p> |DEPENDENT |vault.metrics.runtime.num_goroutines<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_num_goroutines`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Runtime sys bytes |<p>Number of bytes allocated to Vault. This includes what is being used by Vault's heap and what has been reclaimed but not given back to the operating system.</p> |DEPENDENT |vault.metrics.runtime.sys.bytes<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_sys_bytes`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Runtime GC pause, total |<p>The total garbage collector pause time since Vault was last started.</p> |DEPENDENT |vault.metrics.total.gc.pause<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_total_gc_pause_ns`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `1.0E-9`</p> |
+|Vault |Vault: Runtime GC runs, total |<p>Total number of garbage collection runs since Vault was last started.</p> |DEPENDENT |vault.metrics.runtime.total.gc.runs<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_runtime_total_gc_runs`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Vault |Vault: Token count, total |<p>Total number of service tokens available for use; counts all un-expired and un-revoked tokens in Vault's token store. This measurement is performed every 10 minutes.</p> |DEPENDENT |vault.metrics.token<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_token_count`</p><p>- JSONPATH: `$[?(@.name=="vault_token_count")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
|Vault |Vault: Token count by auth, total |<p>Total number of service tokens that were created by a auth method.</p> |DEPENDENT |vault.metrics.token.by_auth<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_token_count_by_auth`</p><p>- JSONPATH: `$[?(@.name=="vault_token_count_by_auth")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
|Vault |Vault: Token count by policy, total |<p>Total number of service tokens that have a policy attached.</p> |DEPENDENT |vault.metrics.token.by_policy<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_token_count_by_policy`</p><p>- JSONPATH: `$[?(@.name=="vault_token_count_by_policy")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
|Vault |Vault: Token count by ttl, total |<p>Number of service tokens, grouped by the TTL range they were assigned at creation.</p> |DEPENDENT |vault.metrics.token.by_ttl<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_token_count_by_ttl`</p><p>- JSONPATH: `$[?(@.name=="vault_token_count_by_ttl")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-|Vault |Vault: Token creation, rate |<p>Number of service or batch tokens created.</p> |DEPENDENT |vault.metrics.token.creation.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_token_creation`</p><p>- JSONPATH: `$[?(@.name=="vault_token_creation")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
+|Vault |Vault: Token creation, rate |<p>Number of service or batch tokens created.</p> |DEPENDENT |vault.metrics.token.creation.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_token_creation`</p><p>- JSONPATH: `$[?(@.name=="vault_token_creation")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
|Vault |Vault: Secret kv entries |<p>Number of entries in each key-value secret engine.</p> |DEPENDENT |vault.metrics.secret.kv.count<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_secret_kv_count`</p><p>- JSONPATH: `$[?(@.name=="vault_secret_kv_count")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p> |
-|Vault |Vault: Token secret lease creation, rate |<p>Counts the number of leases created by secret engines.</p> |DEPENDENT |vault.metrics.secret.lease.creation.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_secret_lease_creation`</p><p>- JSONPATH: `$[?(@.name=="vault_secret_lease_creation")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Storage [{#STORAGE}] {#OPERATION} ops, rate |<p>Number of a {#OPERATION} operation against the {#STORAGE} storage backend.</p> |DEPENDENT |vault.metrics.storage.rate[{#STORAGE}, {#OPERATION}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `{#PATTERN_C} `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Rollback attempt [{#MOUNTPOINT}] ops, rate |<p>Number of operations to perform a rollback operation on the given mount point.</p> |DEPENDENT |vault.metrics.rollback.attempt.rate[{#MOUNTPOINT}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `{#PATTERN_C} `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Route rollback [{#MOUNTPOINT}] ops, rate |<p>Number of operations to dispatch a rollback operation to a backend, and for that backend to process it. Rollback operations are automatically scheduled to clean up partial errors.</p> |DEPENDENT |vault.metrics.route.rollback.rate[{#MOUNTPOINT}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `{#PATTERN_C} `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND |
-|Vault |Vault: Delete WALs, count{#SINGLETON} |<p>Time taken to delete a Write Ahead Log (WAL).</p> |DEPENDENT |vault.metrics.wal.deletewals[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_deletewals_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: GC deleted WAL{#SINGLETON} |<p>Number of Write Ahead Logs (WAL) deleted during each garbage collection run.</p> |DEPENDENT |vault.metrics.wal.gc.deleted[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_gc_deleted `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: WALs on disk, total{#SINGLETON} |<p>Total Number of Write Ahead Logs (WAL) on disk.</p> |DEPENDENT |vault.metrics.wal.gc.total[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_gc_total `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Load WALs, count{#SINGLETON} |<p>Time taken to load a Write Ahead Log (WAL).</p> |DEPENDENT |vault.metrics.wal.loadWAL[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_loadWAL_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Persist WALs, count{#SINGLETON} |<p>Time taken to persist a Write Ahead Log (WAL).</p> |DEPENDENT |vault.metrics.wal.persistwals[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_persistwals_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Flush ready WAL, count{#SINGLETON} |<p>Time taken to flush a ready Write Ahead Log (WAL) to storage.</p> |DEPENDENT |vault.metrics.wal.flushready[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_flushready_count `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Stream WAL missing guard, count{#SINGLETON} |<p>Number of incidences where the starting Merkle Tree index used to begin streaming WAL entries is not matched/found.</p> |DEPENDENT |vault.metrics.logshipper.streamWALs.missing_guard[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `logshipper_streamWALs_missing_guard `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Stream WAL guard found, count{#SINGLETON} |<p>Number of incidences where the starting Merkle Tree index used to begin streaming WAL entries is matched/found.</p> |DEPENDENT |vault.metrics.logshipper.streamWALs.guard_found[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `logshipper_streamWALs_guard_found `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Merkle commit index{#SINGLETON} |<p>The last committed index in the Merkle Tree.</p> |DEPENDENT |vault.metrics.replication.merkle.commit_index[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_merkle_commit_index `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Last WAL{#SINGLETON} |<p>The index of the last WAL.</p> |DEPENDENT |vault.metrics.replication.wal.last_wal[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_wal_last_wal `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Last DR WAL{#SINGLETON} |<p>The index of the last DR WAL.</p> |DEPENDENT |vault.metrics.replication.wal.last_dr_wal[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_wal_last_dr_wal `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Last performance WAL{#SINGLETON} |<p>The index of the last Performance WAL.</p> |DEPENDENT |vault.metrics.replication.wal.last_performance_wal[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_wal_last_performance_wal `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
-|Vault |Vault: Last remote WAL{#SINGLETON} |<p>The index of the last remote WAL.</p> |DEPENDENT |vault.metrics.replication.fsm.last_remote_wal[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_fsm_last_remote_wal `</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Token secret lease creation, rate |<p>Counts the number of leases created by secret engines.</p> |DEPENDENT |vault.metrics.secret.lease.creation.rate<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `vault_secret_lease_creation`</p><p>- JSONPATH: `$[?(@.name=="vault_secret_lease_creation")].value.sum()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 0`</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Storage [{#STORAGE}] {#OPERATION} ops, rate |<p>Number of a {#OPERATION} operation against the {#STORAGE} storage backend.</p> |DEPENDENT |vault.metrics.storage.rate[{#STORAGE}, {#OPERATION}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `{#PATTERN_C}`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Rollback attempt [{#MOUNTPOINT}] ops, rate |<p>Number of operations to perform a rollback operation on the given mount point.</p> |DEPENDENT |vault.metrics.rollback.attempt.rate[{#MOUNTPOINT}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `{#PATTERN_C}`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Route rollback [{#MOUNTPOINT}] ops, rate |<p>Number of operations to dispatch a rollback operation to a backend, and for that backend to process it. Rollback operations are automatically scheduled to clean up partial errors.</p> |DEPENDENT |vault.metrics.route.rollback.rate[{#MOUNTPOINT}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `{#PATTERN_C}`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- CHANGE_PER_SECOND</p> |
+|Vault |Vault: Delete WALs, count{#SINGLETON} |<p>Time taken to delete a Write Ahead Log (WAL).</p> |DEPENDENT |vault.metrics.wal.deletewals[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_deletewals_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: GC deleted WAL{#SINGLETON} |<p>Number of Write Ahead Logs (WAL) deleted during each garbage collection run.</p> |DEPENDENT |vault.metrics.wal.gc.deleted[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_gc_deleted`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: WALs on disk, total{#SINGLETON} |<p>Total Number of Write Ahead Logs (WAL) on disk.</p> |DEPENDENT |vault.metrics.wal.gc.total[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_gc_total`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Load WALs, count{#SINGLETON} |<p>Time taken to load a Write Ahead Log (WAL).</p> |DEPENDENT |vault.metrics.wal.loadWAL[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_loadWAL_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Persist WALs, count{#SINGLETON} |<p>Time taken to persist a Write Ahead Log (WAL).</p> |DEPENDENT |vault.metrics.wal.persistwals[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_persistwals_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Flush ready WAL, count{#SINGLETON} |<p>Time taken to flush a ready Write Ahead Log (WAL) to storage.</p> |DEPENDENT |vault.metrics.wal.flushready[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `vault_wal_flushready_count`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Stream WAL missing guard, count{#SINGLETON} |<p>Number of incidences where the starting Merkle Tree index used to begin streaming WAL entries is not matched/found.</p> |DEPENDENT |vault.metrics.logshipper.streamWALs.missing_guard[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `logshipper_streamWALs_missing_guard`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Stream WAL guard found, count{#SINGLETON} |<p>Number of incidences where the starting Merkle Tree index used to begin streaming WAL entries is matched/found.</p> |DEPENDENT |vault.metrics.logshipper.streamWALs.guard_found[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `logshipper_streamWALs_guard_found`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Merkle commit index{#SINGLETON} |<p>The last committed index in the Merkle Tree.</p> |DEPENDENT |vault.metrics.replication.merkle.commit_index[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_merkle_commit_index`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Last WAL{#SINGLETON} |<p>The index of the last WAL.</p> |DEPENDENT |vault.metrics.replication.wal.last_wal[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_wal_last_wal`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Last DR WAL{#SINGLETON} |<p>The index of the last DR WAL.</p> |DEPENDENT |vault.metrics.replication.wal.last_dr_wal[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_wal_last_dr_wal`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Last performance WAL{#SINGLETON} |<p>The index of the last Performance WAL.</p> |DEPENDENT |vault.metrics.replication.wal.last_performance_wal[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_wal_last_performance_wal`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
+|Vault |Vault: Last remote WAL{#SINGLETON} |<p>The index of the last remote WAL.</p> |DEPENDENT |vault.metrics.replication.fsm.last_remote_wal[{#SINGLETON}]<p>**Preprocessing**:</p><p>- PROMETHEUS_PATTERN: `replication_fsm_last_remote_wal`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Vault |Vault: Token [{#TOKEN_NAME}] error |<p>Token lookup error text.</p> |DEPENDENT |vault.token_via_accessor.error["{#ACCESSOR}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.accessor == "{#ACCESSOR}")].error.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-|Vault |Vault: Token [{#TOKEN_NAME}] has TTL |<p>The Token has TTL.</p> |DEPENDENT |vault.token_via_accessor.has_ttl["{#ACCESSOR}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.accessor == "{#ACCESSOR}")].has_ttl.first()`</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Vault |Vault: Token [{#TOKEN_NAME}] has TTL |<p>The Token has TTL.</p> |DEPENDENT |vault.token_via_accessor.has_ttl["{#ACCESSOR}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.accessor == "{#ACCESSOR}")].has_ttl.first()`</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
|Vault |Vault: Token [{#TOKEN_NAME}] TTL |<p>The TTL period of the token.</p> |DEPENDENT |vault.token_via_accessor.ttl["{#ACCESSOR}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.accessor == "{#ACCESSOR}")].ttl.first()`</p> |
-|Zabbix_raw_items |Vault: Get health |<p>-</p> |HTTP_AGENT |vault.get_health<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED |
-|Zabbix_raw_items |Vault: Get leader |<p>-</p> |HTTP_AGENT |vault.get_leader<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED |
-|Zabbix_raw_items |Vault: Get metrics |<p>-</p> |HTTP_AGENT |vault.get_metrics<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED |
+|Zabbix_raw_items |Vault: Get health |<p>-</p> |HTTP_AGENT |vault.get_health<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> {"healthcheck": 0}`</p> |
+|Zabbix_raw_items |Vault: Get leader |<p>-</p> |HTTP_AGENT |vault.get_leader<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p> |
+|Zabbix_raw_items |Vault: Get metrics |<p>-</p> |HTTP_AGENT |vault.get_metrics<p>**Preprocessing**:</p><p>- CHECK_NOT_SUPPORTED</p> |
|Zabbix_raw_items |Vault: Clear metrics |<p>-</p> |DEPENDENT |vault.clear_metrics<p>**Preprocessing**:</p><p>- CHECK_JSON_ERROR: `$.errors`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|Zabbix_raw_items |Vault: Get tokens |<p>Get information about tokens via their accessors. Accessors are defined in the macro "{$VAULT.TOKEN.ACCESSORS}".</p> |SCRIPT |vault.get_tokens<p>**Expression**:</p>`The text is too long. Please see the template.` |
|Zabbix_raw_items |Vault: Check WAL discovery |<p>-</p> |DEPENDENT |vault.check_wal_discovery<p>**Preprocessing**:</p><p>- PROMETHEUS_TO_JSON: `{__name__=~"^vault_wal_(?:.+)$"}`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- JAVASCRIPT: `return JSON.stringify(value !== "[]" ? [{'{#SINGLETON}': ''}] : []);`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `15m`</p> |
@@ -179,8 +179,8 @@ There are no template links in this template.
|Vault: Failed to get metrics (error: {ITEM.VALUE}) |<p>-</p> |`length(last(/HashiCorp Vault by HTTP/vault.get_metrics.error))>0` |WARNING |<p>**Depends on**:</p><p>- Vault: Vault server is sealed</p> |
|Vault: Current number of open files is too high (over {$VAULT.OPEN.FDS.MAX.WARN}% for 5m) |<p>-</p> |`min(/HashiCorp Vault by HTTP/vault.metrics.process.open.fds,5m)/last(/HashiCorp Vault by HTTP/vault.metrics.process.max.fds)*100>{$VAULT.OPEN.FDS.MAX.WARN}` |WARNING | |
|Vault: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/HashiCorp Vault by HTTP/vault.metrics.process.uptime)<10m` |INFO |<p>Manual close: YES</p> |
-|Vault: High frequency of leadership setup failures (over {$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN} for 1h) |<p>There have been more than {$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN} Vault leadership setup failures in the past 1h</p> |`(max(/HashiCorp Vault by HTTP/vault.metrics.core.leadership.setup_failed,1h)-min(/HashiCorp Vault by HTTP/vault.metrics.core.leadership.setup_failed,1h))>{$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN}` |AVERAGE | |
-|Vault: High frequency of leadership losses (over {$VAULT.LEADERSHIP.LOSSES.MAX.WARN} for 1h) |<p>There have been more than {$VAULT.LEADERSHIP.LOSSES.MAX.WARN} Vault leadership losses in the past 1h</p> |`(max(/HashiCorp Vault by HTTP/vault.metrics.core.leadership_lost,1h)-min(/HashiCorp Vault by HTTP/vault.metrics.core.leadership_lost,1h))>{$VAULT.LEADERSHIP.LOSSES.MAX.WARN}` |AVERAGE | |
+|Vault: High frequency of leadership setup failures (over {$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN} for 1h) |<p>There have been more than {$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN} Vault leadership setup failures in the past 1h.</p> |`(max(/HashiCorp Vault by HTTP/vault.metrics.core.leadership.setup_failed,1h)-min(/HashiCorp Vault by HTTP/vault.metrics.core.leadership.setup_failed,1h))>{$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN}` |AVERAGE | |
+|Vault: High frequency of leadership losses (over {$VAULT.LEADERSHIP.LOSSES.MAX.WARN} for 1h) |<p>There have been more than {$VAULT.LEADERSHIP.LOSSES.MAX.WARN} Vault leadership losses in the past 1h.</p> |`(max(/HashiCorp Vault by HTTP/vault.metrics.core.leadership_lost,1h)-min(/HashiCorp Vault by HTTP/vault.metrics.core.leadership_lost,1h))>{$VAULT.LEADERSHIP.LOSSES.MAX.WARN}` |AVERAGE | |
|Vault: High frequency of leadership step downs (over {$VAULT.LEADERSHIP.STEPDOWNS.MAX.WARN} for 1h) |<p>There have been more than {$VAULT.LEADERSHIP.STEPDOWNS.MAX.WARN} Vault leadership step downs in the past 1h.</p> |`(max(/HashiCorp Vault by HTTP/vault.metrics.core.step_down,1h)-min(/HashiCorp Vault by HTTP/vault.metrics.core.step_down,1h))>{$VAULT.LEADERSHIP.STEPDOWNS.MAX.WARN}` |AVERAGE | |
|Vault: Token [{#TOKEN_NAME}] lookup error occurred |<p>-</p> |`length(last(/HashiCorp Vault by HTTP/vault.token_via_accessor.error["{#ACCESSOR}"]))>0` |WARNING |<p>**Depends on**:</p><p>- Vault: Vault server is sealed</p> |
|Vault: Token [{#TOKEN_NAME}] will expire soon (less than {$VAULT.TOKEN.TTL.MIN.CRIT}) |<p>-</p> |`last(/HashiCorp Vault by HTTP/vault.token_via_accessor.has_ttl["{#ACCESSOR}"])=1 and last(/HashiCorp Vault by HTTP/vault.token_via_accessor.ttl["{#ACCESSOR}"])<{$VAULT.TOKEN.TTL.MIN.CRIT}` |AVERAGE | |
diff --git a/templates/app/vault_http/template_app_vault.yaml b/templates/app/vault_http/template_app_vault.yaml
index eab3bd9d63c..bedfae62359 100644
--- a/templates/app/vault_http/template_app_vault.yaml
+++ b/templates/app/vault_http/template_app_vault.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-12-09T12:17:22Z'
+ date: '2021-12-19T15:19:55Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -1065,7 +1065,7 @@ zabbix_export:
expression: '(max(/HashiCorp Vault by HTTP/vault.metrics.core.leadership.setup_failed,1h)-min(/HashiCorp Vault by HTTP/vault.metrics.core.leadership.setup_failed,1h))>{$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN}'
name: 'Vault: High frequency of leadership setup failures (over {$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN} for 1h)'
priority: AVERAGE
- description: 'There have been more than {$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN} Vault leadership setup failures in the past 1h'
+ description: 'There have been more than {$VAULT.LEADERSHIP.SETUP.FAILED.MAX.WARN} Vault leadership setup failures in the past 1h.'
-
uuid: f0ca68774b5842d7803d2c2619958184
name: 'Vault: Leadership setup lost, counter'
@@ -1097,7 +1097,7 @@ zabbix_export:
expression: '(max(/HashiCorp Vault by HTTP/vault.metrics.core.leadership_lost,1h)-min(/HashiCorp Vault by HTTP/vault.metrics.core.leadership_lost,1h))>{$VAULT.LEADERSHIP.LOSSES.MAX.WARN}'
name: 'Vault: High frequency of leadership losses (over {$VAULT.LEADERSHIP.LOSSES.MAX.WARN} for 1h)'
priority: AVERAGE
- description: 'There have been more than {$VAULT.LEADERSHIP.LOSSES.MAX.WARN} Vault leadership losses in the past 1h'
+ description: 'There have been more than {$VAULT.LEADERSHIP.LOSSES.MAX.WARN} Vault leadership losses in the past 1h.'
-
uuid: 2bdb273b513d4a06914cd6ea11ac2368
name: 'Vault: Post-unseal ops, counter'
diff --git a/templates/app/wildfly_domain_jmx/README.md b/templates/app/wildfly_domain_jmx/README.md
index c7af70a5224..4366d2656f9 100644
--- a/templates/app/wildfly_domain_jmx/README.md
+++ b/templates/app/wildfly_domain_jmx/README.md
@@ -15,7 +15,7 @@ This template was tested on:
> See [Zabbix template operation](https://www.zabbix.com/documentation/5.4/manual/config/templates_out_of_the_box/jmx) for basic instructions.
-Metrics are collected by JMX.
+Metrics are collected by JMX.
This template works with Domain Controller.
1. Enable and configure JMX access to WildFly. See documentation for [instructions](https://docs.wildfly.org/23/Admin_Guide.html#JMX).
@@ -50,8 +50,8 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Deployments discovery |<p>Discovery deployments metrics.</p> |JMX |jmx.get[beans,"jboss.as.expr:deployment=*,server-group=*"]<p>**Filter**:</p>AND <p>- A: {#DEPLOYMENT} MATCHES_REGEX `{$WILDFLY.DEPLOYMENT.MATCHES}`</p><p>- B: {#DEPLOYMENT} NOT_MATCHES_REGEX `{$WILDFLY.DEPLOYMENT.NOT_MATCHES}`</p> |
-|Servers discovery |<p>Discovery instances in domain.</p> |JMX |jmx.get[beans,"jboss.as:host=master,server-config=*"]<p>**Filter**:</p>AND <p>- A: {#SERVER} MATCHES_REGEX `{$WILDFLY.SERVER.MATCHES}`</p><p>- B: {#SERVER} NOT_MATCHES_REGEX `{$WILDFLY.SERVER.NOT_MATCHES}`</p> |
+|Deployments discovery |<p>Discovery deployments metrics.</p> |JMX |jmx.get[beans,"jboss.as.expr:deployment=*,server-group=*"]<p>**Filter**:</p>AND <p>- {#DEPLOYMENT} MATCHES_REGEX `{$WILDFLY.DEPLOYMENT.MATCHES}`</p><p>- {#DEPLOYMENT} NOT_MATCHES_REGEX `{$WILDFLY.DEPLOYMENT.NOT_MATCHES}`</p> |
+|Servers discovery |<p>Discovery instances in domain.</p> |JMX |jmx.get[beans,"jboss.as:host=master,server-config=*"]<p>**Filter**:</p>AND <p>- {#SERVER} MATCHES_REGEX `{$WILDFLY.SERVER.MATCHES}`</p><p>- {#SERVER} NOT_MATCHES_REGEX `{$WILDFLY.SERVER.NOT_MATCHES}`</p> |
## Items collected
@@ -62,9 +62,9 @@ There are no template links in this template.
|WildFly |WildFly: Process type |<p>The type of process represented by this root resource.</p> |JMX |jmx["jboss.as:management-root=server","processType"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|WildFly |WildFly: Version |<p>The version of the WildFly Core based product release</p> |JMX |jmx["jboss.as:management-root=server","productVersion"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|WildFly |WildFly: Uptime |<p>WildFly server uptime.</p> |JMX |jmx["java.lang:type=Runtime","Uptime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p> |
-|WildFly |WildFly deployment [{#DEPLOYMENT}]: Enabled |<p>Boolean indicating whether the deployment content is currently deployed in the runtime (or should be deployed in the runtime the next time the server starts.)</p> |JMX |jmx["{#JMXOBJ}",enabled]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|WildFly |WildFly deployment [{#DEPLOYMENT}]: Managed |<p>Indicates if the deployment is managed (aka uses the ContentRepository).</p> |JMX |jmx["{#JMXOBJ}",managed]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|WildFly |WildFly domain: Server {#SERVER}: Autostart |<p>Whether or not this server should be started when the Host Controller starts.</p> |JMX |jmx["{#JMXOBJ}",autoStart]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly deployment [{#DEPLOYMENT}]: Enabled |<p>Boolean indicating whether the deployment content is currently deployed in the runtime (or should be deployed in the runtime the next time the server starts.)</p> |JMX |jmx["{#JMXOBJ}",enabled]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly deployment [{#DEPLOYMENT}]: Managed |<p>Indicates if the deployment is managed (aka uses the ContentRepository).</p> |JMX |jmx["{#JMXOBJ}",managed]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly domain: Server {#SERVER}: Autostart |<p>Whether or not this server should be started when the Host Controller starts.</p> |JMX |jmx["{#JMXOBJ}",autoStart]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|WildFly |WildFly domain: Server {#SERVER}: Status |<p>The current status of the server.</p> |JMX |jmx["{#JMXOBJ}",status]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|WildFly |WildFly domain: Server {#SERVER}: Server group |<p>The name of a server group from the domain model.</p> |JMX |jmx["{#JMXOBJ}",group]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
@@ -72,10 +72,10 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|WildFly: Version has changed (new version: {ITEM.VALUE}) |<p>WildFly version has changed. Ack to close.</p> |`{TEMPLATE_NAME:jmx["jboss.as:management-root=server","productVersion"].diff()}=1 and {TEMPLATE_NAME:jmx["jboss.as:management-root=server","productVersion"].strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|WildFly: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:jmx["java.lang:type=Runtime","Uptime"].last()}<10m` |INFO |<p>Manual close: YES</p> |
-|WildFly domain: Server {#SERVER}: Server status has changed (new status: {ITEM.VALUE}) |<p>Server status has changed. Ack to close.</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",status].diff()}=1 and {TEMPLATE_NAME:jmx["{#JMXOBJ}",status].strlen()}>0` |WARNING |<p>Manual close: YES</p> |
-|WildFly domain: Server {#SERVER}: Server group has changed (new group: {ITEM.VALUE}) |<p>Server group has changed. Ack to close.</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",group].diff()}=1 and {TEMPLATE_NAME:jmx["{#JMXOBJ}",group].strlen()}>0` |INFO |<p>Manual close: YES</p> |
+|WildFly: Version has changed (new version: {ITEM.VALUE}) |<p>WildFly version has changed. Ack to close.</p> |`last(/WildFly Domain by JMX/jmx["jboss.as:management-root=server","productVersion"],#1)<>last(/WildFly Domain by JMX/jmx["jboss.as:management-root=server","productVersion"],#2) and length(last(/WildFly Domain by JMX/jmx["jboss.as:management-root=server","productVersion"]))>0` |INFO |<p>Manual close: YES</p> |
+|WildFly: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/WildFly Domain by JMX/jmx["java.lang:type=Runtime","Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
+|WildFly domain: Server {#SERVER}: Server status has changed (new status: {ITEM.VALUE}) |<p>Server status has changed. Ack to close.</p> |`last(/WildFly Domain by JMX/jmx["{#JMXOBJ}",status],#1)<>last(/WildFly Domain by JMX/jmx["{#JMXOBJ}",status],#2) and length(last(/WildFly Domain by JMX/jmx["{#JMXOBJ}",status]))>0` |WARNING |<p>Manual close: YES</p> |
+|WildFly domain: Server {#SERVER}: Server group has changed (new group: {ITEM.VALUE}) |<p>Server group has changed. Ack to close.</p> |`last(/WildFly Domain by JMX/jmx["{#JMXOBJ}",group],#1)<>last(/WildFly Domain by JMX/jmx["{#JMXOBJ}",group],#2) and length(last(/WildFly Domain by JMX/jmx["{#JMXOBJ}",group]))>0` |INFO |<p>Manual close: YES</p> |
## Feedback
diff --git a/templates/app/wildfly_domain_jmx/template_app_wildfly_domain_jmx.yaml b/templates/app/wildfly_domain_jmx/template_app_wildfly_domain_jmx.yaml
index 98494ec4eb0..9835abe1ebe 100644
--- a/templates/app/wildfly_domain_jmx/template_app_wildfly_domain_jmx.yaml
+++ b/templates/app/wildfly_domain_jmx/template_app_wildfly_domain_jmx.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:25Z'
+ date: '2021-12-19T15:19:58Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -18,7 +18,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
diff --git a/templates/app/wildfly_server_jmx/README.md b/templates/app/wildfly_server_jmx/README.md
index b64ee2850b5..61b9faf5498 100644
--- a/templates/app/wildfly_server_jmx/README.md
+++ b/templates/app/wildfly_server_jmx/README.md
@@ -15,7 +15,7 @@ This template was tested on:
> See [Zabbix template operation](https://www.zabbix.com/documentation/5.4/manual/config/templates_out_of_the_box/jmx) for basic instructions.
-Metrics are collected by JMX.
+Metrics are collected by JMX.
This template works with standalone and domain instances.
1. Enable and configure JMX access to WildFly. See documentation for [instructions](https://docs.wildfly.org/23/Admin_Guide.html#JMX).
@@ -49,7 +49,7 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Deployments discovery |<p>Discovery deployments metrics.</p> |JMX |jmx.get[beans,"jboss.as.expr:deployment=*"]<p>**Filter**:</p>AND <p>- A: {#DEPLOYMENT} MATCHES_REGEX `{$WILDFLY.DEPLOYMENT.MATCHES}`</p><p>- B: {#DEPLOYMENT} NOT_MATCHES_REGEX `{$WILDFLY.DEPLOYMENT.NOT_MATCHES}`</p> |
+|Deployments discovery |<p>Discovery deployments metrics.</p> |JMX |jmx.get[beans,"jboss.as.expr:deployment=*"]<p>**Filter**:</p>AND <p>- {#DEPLOYMENT} MATCHES_REGEX `{$WILDFLY.DEPLOYMENT.MATCHES}`</p><p>- {#DEPLOYMENT} NOT_MATCHES_REGEX `{$WILDFLY.DEPLOYMENT.NOT_MATCHES}`</p> |
|JDBC metrics discovery |<p>-</p> |JMX |jmx.get[beans,"jboss.as:subsystem=datasources,data-source=*,statistics=jdbc"] |
|Pools metrics discovery |<p>-</p> |JMX |jmx.get[beans,"jboss.as:subsystem=datasources,data-source=*,statistics=pool"] |
|Undertow metrics discovery |<p>-</p> |JMX |jmx.get[beans,"jboss.as:subsystem=undertow,server=*,http-listener=*"] |
@@ -65,28 +65,28 @@ There are no template links in this template.
|WildFly |WildFly: Server controller state |<p>The current state of the server controller; either STARTING, RUNNING, RESTART_REQUIRED, RELOAD_REQUIRED or STOPPING.</p> |JMX |jmx["jboss.as:management-root=server","serverState"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|WildFly |WildFly: Version |<p>The version of the WildFly Core based product release</p> |JMX |jmx["jboss.as:management-root=server","productVersion"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|WildFly |WildFly: Uptime |<p>WildFly server uptime.</p> |JMX |jmx["java.lang:type=Runtime","Uptime"]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p> |
-|WildFly |WildFly: Transactions: Total, rate |<p>The total number of transactions (top-level and nested) created per second.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly: Transactions: Aborted, rate |<p>The number of aborted (i.e. rolledback) transactions per second.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfAbortedTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly: Transactions: Application rollbacks, rate |<p>The number of transactions that have been rolled back by application request. This includes those that timeout, since the timeout behavior is considered an attribute of the application configuration.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfApplicationRollbacks"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly: Transactions: Committed, rate |<p>The number of committed transactions</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfCommittedTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly: Transactions: Heuristics, rate |<p>The number of transactions which have terminated with heuristic outcomes.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfHeuristics"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly: Transactions: Total, rate |<p>The total number of transactions (top-level and nested) created per second.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly: Transactions: Aborted, rate |<p>The number of aborted (i.e. rolledback) transactions per second.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfAbortedTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly: Transactions: Application rollbacks, rate |<p>The number of transactions that have been rolled back by application request. This includes those that timeout, since the timeout behavior is considered an attribute of the application configuration.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfApplicationRollbacks"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly: Transactions: Committed, rate |<p>The number of committed transactions</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfCommittedTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly: Transactions: Heuristics, rate |<p>The number of transactions which have terminated with heuristic outcomes.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfHeuristics"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly: Transactions: Current |<p>The number of transactions that have begun but not yet terminated.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfInflightTransactions"] |
-|WildFly |WildFly: Transactions: Nested, rate |<p>The total number of nested (sub) transactions created.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfNestedTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly: Transactions: ResourceRollbacks, rate |<p>The number of transactions that rolled back due to resource (participant) failure.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfResourceRollbacks"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly: Transactions: System rollbacks, rate |<p>The number of transactions that have been rolled back due to internal system errors.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfSystemRollbacks"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly: Transactions: Timed out, rate |<p>The number of transactions that have rolled back due to timeout.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfTimedOutTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly deployment [{#DEPLOYMENT}]: Status |<p>The current runtime status of a deployment. </p><p>Possible status modes are OK, FAILED, and STOPPED. </p><p>FAILED indicates a dependency is missing or a service could not start. </p><p>STOPPED indicates that the deployment was not enabled or was manually stopped.</p> |JMX |jmx["{#JMXOBJ}",status]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|WildFly |WildFly deployment [{#DEPLOYMENT}]: Enabled |<p>Boolean indicating whether the deployment content is currently deployed in the runtime (or should be deployed in the runtime the next time the server starts.)</p> |JMX |jmx["{#JMXOBJ}",enabled]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|WildFly |WildFly deployment [{#DEPLOYMENT}]: Managed |<p>Indicates if the deployment is managed (aka uses the ContentRepository).</p> |JMX |jmx["{#JMXOBJ}",managed]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|WildFly |WildFly deployment [{#DEPLOYMENT}]: Persistent |<p>Indicates if the deployment is managed (aka uses the ContentRepository).</p> |JMX |jmx["{#JMXOBJ}",persistent]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly: Transactions: Nested, rate |<p>The total number of nested (sub) transactions created.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfNestedTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly: Transactions: ResourceRollbacks, rate |<p>The number of transactions that rolled back due to resource (participant) failure.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfResourceRollbacks"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly: Transactions: System rollbacks, rate |<p>The number of transactions that have been rolled back due to internal system errors.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfSystemRollbacks"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly: Transactions: Timed out, rate |<p>The number of transactions that have rolled back due to timeout.</p> |JMX |jmx["jboss.as:subsystem=transactions","numberOfTimedOutTransactions"]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly deployment [{#DEPLOYMENT}]: Status |<p>The current runtime status of a deployment.</p><p>Possible status modes are OK, FAILED, and STOPPED.</p><p>FAILED indicates a dependency is missing or a service could not start.</p><p>STOPPED indicates that the deployment was not enabled or was manually stopped.</p> |JMX |jmx["{#JMXOBJ}",status]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly deployment [{#DEPLOYMENT}]: Enabled |<p>Boolean indicating whether the deployment content is currently deployed in the runtime (or should be deployed in the runtime the next time the server starts.)</p> |JMX |jmx["{#JMXOBJ}",enabled]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly deployment [{#DEPLOYMENT}]: Managed |<p>Indicates if the deployment is managed (aka uses the ContentRepository).</p> |JMX |jmx["{#JMXOBJ}",managed]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly deployment [{#DEPLOYMENT}]: Persistent |<p>Indicates if the deployment is managed (aka uses the ContentRepository).</p> |JMX |jmx["{#JMXOBJ}",persistent]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|WildFly |WildFly deployment [{#DEPLOYMENT}]: Enabled time |<p>Indicates if the deployment is managed (aka uses the ContentRepository).</p> |JMX |jmx["{#JMXOBJ}",enabledTime]<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.001`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache access, rate |<p>The number of times that the statement cache was accessed per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheAccessCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache add, rate |<p>The number of statements added to the statement cache per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheAddCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache access, rate |<p>The number of times that the statement cache was accessed per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheAccessCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache add, rate |<p>The number of statements added to the statement cache per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheAddCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache current size |<p>The number of prepared and callable statements currently cached in the statement cache.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheCurrentSize] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache delete, rate |<p>The number of statements discarded from the cache per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheDeleteCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache hit, rate |<p>The number of times that statements from the cache were used per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheHitCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache miss, rate |<p>The number of times that a statement request could not be satisfied with a statement from the cache per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheMissCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Statistics enabled |<p>Define whether runtime statistics are enabled or not.</p> |JMX |jmx["{#JMXOBJ}",statisticsEnabled]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache delete, rate |<p>The number of statements discarded from the cache per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheDeleteCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache hit, rate |<p>The number of times that statements from the cache were used per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheHitCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Cache miss, rate |<p>The number of times that a statement request could not be satisfied with a statement from the cache per second.</p> |JMX |jmx["{#JMXOBJ}",PreparedStatementCacheMissCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Statistics enabled |<p>Define whether runtime statistics are enabled or not.</p> |JMX |jmx["{#JMXOBJ}",statisticsEnabled]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Active |<p>The number of open connections.</p> |JMX |jmx["{#JMXOBJ}",ActiveCount] |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Available |<p>The available count.</p> |JMX |jmx["{#JMXOBJ}",AvailableCount] |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Blocking time, avg |<p>Average Blocking Time for pool.</p> |JMX |jmx["{#JMXOBJ}",AverageBlockingTime] |
@@ -94,51 +94,51 @@ There are no template links in this template.
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Get time, avg |<p>The average time spent obtaining a physical connection.</p> |JMX |jmx["{#JMXOBJ}",AverageGetTime] |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Pool time, avg |<p>The average time for a physical connection spent in the pool.</p> |JMX |jmx["{#JMXOBJ}",AveragePoolTime] |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Usage time, avg |<p>The average time spent using a physical connection</p> |JMX |jmx["{#JMXOBJ}",AverageUsageTime] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Blocking failure, rate |<p>The number of failures trying to obtain a physical connection per second.</p> |JMX |jmx["{#JMXOBJ}",BlockingFailureCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Created, rate |<p>The created per second</p> |JMX |jmx["{#JMXOBJ}",CreatedCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Destroyed, rate |<p>The destroyed count.</p> |JMX |jmx["{#JMXOBJ}",DestroyedCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Blocking failure, rate |<p>The number of failures trying to obtain a physical connection per second.</p> |JMX |jmx["{#JMXOBJ}",BlockingFailureCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Created, rate |<p>The created per second</p> |JMX |jmx["{#JMXOBJ}",CreatedCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Destroyed, rate |<p>The destroyed count.</p> |JMX |jmx["{#JMXOBJ}",DestroyedCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Idle |<p>The number of physical connections currently idle.</p> |JMX |jmx["{#JMXOBJ}",IdleCount] |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: In use |<p>The number of physical connections currently in use.</p> |JMX |jmx["{#JMXOBJ}",InUseCount] |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Used, max |<p>The maximum number of connections used.</p> |JMX |jmx["{#JMXOBJ}",MaxUsedCount] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Statistics enabled |<p>Define whether runtime statistics are enabled or not.</p> |JMX |jmx["{#JMXOBJ}",statisticsEnabled]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL<p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Timed out, rate |<p>The timed out connections per second.</p> |JMX |jmx["{#JMXOBJ}",TimedOut]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Statistics enabled |<p>Define whether runtime statistics are enabled or not.</p> |JMX |jmx["{#JMXOBJ}",statisticsEnabled]<p>**Preprocessing**:</p><p>- BOOL_TO_DECIMAL</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Timed out, rate |<p>The timed out connections per second.</p> |JMX |jmx["{#JMXOBJ}",TimedOut]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: Connections: Wait |<p>The number of requests that had to wait to obtain a physical connection.</p> |JMX |jmx["{#JMXOBJ}",WaitCount] |
|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Commit time, avg |<p>The average time for a XAResource commit invocation.</p> |JMX |jmx["{#JMXOBJ}",XACommitAverageTime] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Commit, rate |<p>The number of XAResource commit invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XACommitCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Commit, rate |<p>The number of XAResource commit invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XACommitCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: End time, avg |<p>The average time for a XAResource end invocation.</p> |JMX |jmx["{#JMXOBJ}",XAEndAverageTime] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: End, rate |<p>The number of XAResource end invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XAEndCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: End, rate |<p>The number of XAResource end invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XAEndCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Forget time, avg |<p>The average time for a XAResource forget invocation.</p> |JMX |jmx["{#JMXOBJ}",XAForgetAverageTime] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Forget, rate |<p>The number of XAResource forget invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XAForgetCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Forget, rate |<p>The number of XAResource forget invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XAForgetCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Prepare time, avg |<p>The average time for a XAResource prepare invocation.</p> |JMX |jmx["{#JMXOBJ}",XAPrepareAverageTime] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Prepare, rate |<p>The number of XAResource prepare invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XAPrepareCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Prepare, rate |<p>The number of XAResource prepare invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XAPrepareCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Recover time, avg |<p>The average time for a XAResource recover invocation.</p> |JMX |jmx["{#JMXOBJ}",XARecoverAverageTime] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Recover, rate |<p>The number of XAResource recover invocationsper second.</p> |JMX |jmx["{#JMXOBJ}",XARecoverCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Recover, rate |<p>The number of XAResource recover invocationsper second.</p> |JMX |jmx["{#JMXOBJ}",XARecoverCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Rollback time, avg |<p>The average time for a XAResource rollback invocation.</p> |JMX |jmx["{#JMXOBJ}",XARollbackAverageTime] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Rollback, rate |<p>The number of XAResource rollback invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XARollbackCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Rollback, rate |<p>The number of XAResource rollback invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XARollbackCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Start time, avg |<p>The average time for a XAResource start invocation.</p> |JMX |jmx["{#JMXOBJ}",XAStartAverageTime] |
-|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Start rate |<p>The number of XAResource start invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XAStartCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly listener {#HTTP_LISTENER}: Errors, rate |<p>The number of 500 responses that have been sent by this listener per second.</p> |JMX |jmx["{#JMXOBJ}",errorCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly listener {#HTTP_LISTENER}: Requests, rate |<p>The number of requests this listener has served per second.</p> |JMX |jmx["{#JMXOBJ}",requestCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly listener {#HTTP_LISTENER}: Bytes sent, rate |<p>The number of bytes that have been sent out on this listener per second.</p> |JMX |jmx["{#JMXOBJ}",bytesSent]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|WildFly |WildFly listener {#HTTP_LISTENER}: Bytes received, rate |<p>The number of bytes that have been received by this listener per second.</p> |JMX |jmx["{#JMXOBJ}",bytesReceived]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|WildFly |WildFly {#JMX_DATA_SOURCE}: XA: Start rate |<p>The number of XAResource start invocations per second.</p> |JMX |jmx["{#JMXOBJ}",XAStartCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly listener {#HTTP_LISTENER}: Errors, rate |<p>The number of 500 responses that have been sent by this listener per second.</p> |JMX |jmx["{#JMXOBJ}",errorCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly listener {#HTTP_LISTENER}: Requests, rate |<p>The number of requests this listener has served per second.</p> |JMX |jmx["{#JMXOBJ}",requestCount]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly listener {#HTTP_LISTENER}: Bytes sent, rate |<p>The number of bytes that have been sent out on this listener per second.</p> |JMX |jmx["{#JMXOBJ}",bytesSent]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|WildFly |WildFly listener {#HTTP_LISTENER}: Bytes received, rate |<p>The number of bytes that have been received by this listener per second.</p> |JMX |jmx["{#JMXOBJ}",bytesReceived]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|WildFly: Server needs to restart for configuration change. |<p>-</p> |`{TEMPLATE_NAME:jmx["jboss.as:management-root=server","runtimeConfigurationState"].str(ok)}=0` |WARNING | |
-|WildFly: Server controller is not in RUNNING state |<p>-</p> |`{TEMPLATE_NAME:jmx["jboss.as:management-root=server","serverState"].str(running)}=0` |WARNING |<p>**Depends on**:</p><p>- WildFly: Server needs to restart for configuration change.</p> |
-|WildFly: Version has changed (new version: {ITEM.VALUE}) |<p>WildFly version has changed. Ack to close.</p> |`{TEMPLATE_NAME:jmx["jboss.as:management-root=server","productVersion"].diff()}=1 and {TEMPLATE_NAME:jmx["jboss.as:management-root=server","productVersion"].strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|WildFly: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`{TEMPLATE_NAME:jmx["java.lang:type=Runtime","Uptime"].last()}<10m` |INFO |<p>Manual close: YES</p> |
-|WildFly: Failed to fetch info data (or no data for 15m) |<p>Zabbix has not received data for items for the last 15 minutes</p> |`{TEMPLATE_NAME:jmx["java.lang:type=Runtime","Uptime"].nodata(15m)}=1` |WARNING | |
-|WildFly deployment [{#DEPLOYMENT}]: Deployment status has changed (new status: {ITEM.VALUE}) |<p>Deployment status has changed. Ack to close.</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",status].diff()}=1 and {TEMPLATE_NAME:jmx["{#JMXOBJ}",status].strlen()}>0` |WARNING |<p>Manual close: YES</p> |
-|WildFly {#JMX_DATA_SOURCE}: JDBC monitoring statistic is not enabled |<p>-</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",statisticsEnabled].last()}=0` |INFO | |
-|WildFly {#JMX_DATA_SOURCE}: There are no active connections for 5m |<p>-</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",ActiveCount].max(5m)}=0` |WARNING | |
-|WildFly {#JMX_DATA_SOURCE}: Connection usage is too high (over {$WILDFLY.CONN.USAGE.WARN.MAX} in 5m) |<p>-</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",InUseCount].min(5m)}/{WildFly Server by JMX:jmx["{#JMXOBJ}",AvailableCount].last()}*100>{$WILDFLY.CONN.USAGE.WARN.MAX}` |HIGH | |
-|WildFly {#JMX_DATA_SOURCE}: Pools monitoring statistic is not enabled |<p>Zabbix has not received data for items for the last 15 minutes</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",statisticsEnabled].last()}=0` |INFO | |
-|WildFly {#JMX_DATA_SOURCE}: There are timeout connections |<p>-</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",TimedOut].last()}>0` |WARNING | |
-|WildFly {#JMX_DATA_SOURCE}: Too many waiting connections (over {$WILDFLY.CONN.WAIT.MAX.WARN} for 5m) |<p>-</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",WaitCount].min(5m)}>{$WILDFLY.CONN.WAIT.MAX.WARN}` |WARNING | |
-|WildFly listener {#HTTP_LISTENER}: There are 500 responses by this listener. |<p>-</p> |`{TEMPLATE_NAME:jmx["{#JMXOBJ}",errorCount].last()}>0` |WARNING | |
+|WildFly: Server needs to restart for configuration change. |<p>-</p> |`find(/WildFly Server by JMX/jmx["jboss.as:management-root=server","runtimeConfigurationState"],,"like","ok")=0` |WARNING | |
+|WildFly: Server controller is not in RUNNING state |<p>-</p> |`find(/WildFly Server by JMX/jmx["jboss.as:management-root=server","serverState"],,"like","running")=0` |WARNING |<p>**Depends on**:</p><p>- WildFly: Server needs to restart for configuration change.</p> |
+|WildFly: Version has changed (new version: {ITEM.VALUE}) |<p>WildFly version has changed. Ack to close.</p> |`last(/WildFly Server by JMX/jmx["jboss.as:management-root=server","productVersion"],#1)<>last(/WildFly Server by JMX/jmx["jboss.as:management-root=server","productVersion"],#2) and length(last(/WildFly Server by JMX/jmx["jboss.as:management-root=server","productVersion"]))>0` |INFO |<p>Manual close: YES</p> |
+|WildFly: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/WildFly Server by JMX/jmx["java.lang:type=Runtime","Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
+|WildFly: Failed to fetch info data (or no data for 15m) |<p>Zabbix has not received data for items for the last 15 minutes</p> |`nodata(/WildFly Server by JMX/jmx["java.lang:type=Runtime","Uptime"],15m)=1` |WARNING | |
+|WildFly deployment [{#DEPLOYMENT}]: Deployment status has changed (new status: {ITEM.VALUE}) |<p>Deployment status has changed. Ack to close.</p> |`last(/WildFly Server by JMX/jmx["{#JMXOBJ}",status],#1)<>last(/WildFly Server by JMX/jmx["{#JMXOBJ}",status],#2) and length(last(/WildFly Server by JMX/jmx["{#JMXOBJ}",status]))>0` |WARNING |<p>Manual close: YES</p> |
+|WildFly {#JMX_DATA_SOURCE}: JDBC monitoring statistic is not enabled |<p>-</p> |`last(/WildFly Server by JMX/jmx["{#JMXOBJ}",statisticsEnabled])=0` |INFO | |
+|WildFly {#JMX_DATA_SOURCE}: There are no active connections for 5m |<p>-</p> |`max(/WildFly Server by JMX/jmx["{#JMXOBJ}",ActiveCount],5m)=0` |WARNING | |
+|WildFly {#JMX_DATA_SOURCE}: Connection usage is too high (over {$WILDFLY.CONN.USAGE.WARN.MAX} in 5m) |<p>-</p> |`min(/WildFly Server by JMX/jmx["{#JMXOBJ}",InUseCount],5m)/last(/WildFly Server by JMX/jmx["{#JMXOBJ}",AvailableCount])*100>{$WILDFLY.CONN.USAGE.WARN.MAX}` |HIGH | |
+|WildFly {#JMX_DATA_SOURCE}: Pools monitoring statistic is not enabled |<p>Zabbix has not received data for items for the last 15 minutes</p> |`last(/WildFly Server by JMX/jmx["{#JMXOBJ}",statisticsEnabled])=0` |INFO | |
+|WildFly {#JMX_DATA_SOURCE}: There are timeout connections |<p>-</p> |`last(/WildFly Server by JMX/jmx["{#JMXOBJ}",TimedOut])>0` |WARNING | |
+|WildFly {#JMX_DATA_SOURCE}: Too many waiting connections (over {$WILDFLY.CONN.WAIT.MAX.WARN} for 5m) |<p>-</p> |`min(/WildFly Server by JMX/jmx["{#JMXOBJ}",WaitCount],5m)>{$WILDFLY.CONN.WAIT.MAX.WARN}` |WARNING | |
+|WildFly listener {#HTTP_LISTENER}: There are 500 responses by this listener. |<p>-</p> |`last(/WildFly Server by JMX/jmx["{#JMXOBJ}",errorCount])>0` |WARNING | |
## Feedback
diff --git a/templates/app/wildfly_server_jmx/template_app_wildfly_server_jmx.yaml b/templates/app/wildfly_server_jmx/template_app_wildfly_server_jmx.yaml
index f9327790f8b..85fad178f5b 100644
--- a/templates/app/wildfly_server_jmx/template_app_wildfly_server_jmx.yaml
+++ b/templates/app/wildfly_server_jmx/template_app_wildfly_server_jmx.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:26Z'
+ date: '2021-12-19T15:19:58Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -18,7 +18,7 @@ zabbix_export:
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -537,9 +537,9 @@ zabbix_export:
username: '{$WILDFLY.USER}'
password: '{$WILDFLY.PASSWORD}'
description: |
- The current runtime status of a deployment.
- Possible status modes are OK, FAILED, and STOPPED.
- FAILED indicates a dependency is missing or a service could not start.
+ The current runtime status of a deployment.
+ Possible status modes are OK, FAILED, and STOPPED.
+ FAILED indicates a dependency is missing or a service could not start.
STOPPED indicates that the deployment was not enabled or was manually stopped.
preprocessing:
-
diff --git a/templates/app/zabbix_server/README.md b/templates/app/zabbix_server/README.md
index 23d3913b8ec..c60572bdb46 100644
--- a/templates/app/zabbix_server/README.md
+++ b/templates/app/zabbix_server/README.md
@@ -20,108 +20,121 @@ There are no template links in this template.
## Discovery rules
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|High availability cluster node discovery |<p>LLD rule with item and trigger prototypes for node discovery.</p> |DEPENDENT |zabbix.nodes.discovery |
## Items collected
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
-|Zabbix server |Zabbix server: Queue over 10 minutes |<p>Number of monitored items in the queue which are delayed at least by 10 minutes</p> |INTERNAL |zabbix[queue,10m] |
-|Zabbix server |Zabbix server: Queue |<p>Number of monitored items in the queue which are delayed at least by 6 seconds</p> |INTERNAL |zabbix[queue] |
-|Zabbix server |Zabbix server: Utilization of alert manager internal processes, in % |<p>Average percentage of time alert manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,alert manager,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of alert syncer internal processes, in % |<p>Average percentage of time alert syncer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,alert syncer,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of alerter internal processes, in % |<p>Average percentage of time alerter processes have been busy in the last minute</p> |INTERNAL |zabbix[process,alerter,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of availability manager internal processes, in % |<p>Average percentage of time availability manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,availability manager,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of configuration syncer internal processes, in % |<p>Average percentage of time configuration syncer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,configuration syncer,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of discoverer data collector processes, in % |<p>Average percentage of time discoverer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,discoverer,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of escalator internal processes, in % |<p>Average percentage of time escalator processes have been busy in the last minute</p> |INTERNAL |zabbix[process,escalator,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of history poller data collector processes, in % |<p>Average percentage of time history poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,history poller,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of history syncer internal processes, in % |<p>Average percentage of time history syncer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,history syncer,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of housekeeper internal processes, in % |<p>Average percentage of time housekeeper processes have been busy in the last minute</p> |INTERNAL |zabbix[process,housekeeper,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of http poller data collector processes, in % |<p>Average percentage of time http poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,http poller,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of icmp pinger data collector processes, in % |<p>Average percentage of time icmp pinger processes have been busy in the last minute</p> |INTERNAL |zabbix[process,icmp pinger,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of ipmi manager internal processes, in % |<p>Average percentage of time ipmi manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,ipmi manager,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of ipmi poller data collector processes, in % |<p>Average percentage of time ipmi poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,ipmi poller,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of java poller data collector processes, in % |<p>Average percentage of time java poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,java poller,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of LLD manager internal processes, in % |<p>Average percentage of time lld manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,lld manager,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of LLD worker internal processes, in % |<p>Average percentage of time lld worker processes have been busy in the last minute</p> |INTERNAL |zabbix[process,lld worker,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of poller data collector processes, in % |<p>Average percentage of time poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,poller,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of preprocessing worker internal processes, in % |<p>Average percentage of time preprocessing worker processes have been busy in the last minute</p> |INTERNAL |zabbix[process,preprocessing worker,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of preprocessing manager internal processes, in % |<p>Average percentage of time preprocessing manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,preprocessing manager,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of proxy poller data collector processes, in % |<p>Average percentage of time proxy poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,proxy poller,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of report manager internal processes, in % |<p>Average percentage of time report manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,report manager,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of report writer internal processes, in % |<p>Average percentage of time report writer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,report writer,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of self-monitoring internal processes, in % |<p>Average percentage of time self-monitoring processes have been busy in the last minute</p> |INTERNAL |zabbix[process,self-monitoring,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of snmp trapper data collector processes, in % |<p>Average percentage of time snmp trapper processes have been busy in the last minute</p> |INTERNAL |zabbix[process,snmp trapper,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of task manager internal processes, in % |<p>Average percentage of time task manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,task manager,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of timer internal processes, in % |<p>Average percentage of time timer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,timer,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of trapper data collector processes, in % |<p>Average percentage of time trapper processes have been busy in the last minute</p> |INTERNAL |zabbix[process,trapper,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of unreachable poller data collector processes, in % |<p>Average percentage of time unreachable poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,unreachable poller,avg,busy] |
-|Zabbix server |Zabbix server: Utilization of vmware data collector processes, in % |<p>Average percentage of time vmware collector processes have been busy in the last minute</p> |INTERNAL |zabbix[process,vmware collector,avg,busy] |
-|Zabbix server |Zabbix server: Configuration cache, % used |<p>Availability statistics of Zabbix configuration cache. Percentage of used buffer</p> |INTERNAL |zabbix[rcache,buffer,pused] |
-|Zabbix server |Zabbix server: Trend function cache, % unique requests |<p>Effectiveness statistics of the Zabbix trend function cache. Percentage of cached items from cached items + requests. Low percentage most likely means that the cache size can be reduced.</p> |INTERNAL |zabbix[tcache,cache,pitems] |
-|Zabbix server |Zabbix server: Trend function cache, % misses |<p>Effectiveness statistics of the Zabbix trend function cache. Percentage of cache misses</p> |INTERNAL |zabbix[tcache,cache,pmisses] |
-|Zabbix server |Zabbix server: Value cache, % used |<p>Availability statistics of Zabbix value cache. Percentage of used buffer</p> |INTERNAL |zabbix[vcache,buffer,pused] |
-|Zabbix server |Zabbix server: Value cache hits |<p>Effectiveness statistics of Zabbix value cache. Number of cache hits (history values taken from the cache)</p> |INTERNAL |zabbix[vcache,cache,hits]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Zabbix server: Value cache misses |<p>Effectiveness statistics of Zabbix value cache. Number of cache misses (history values taken from the database)</p> |INTERNAL |zabbix[vcache,cache,misses]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Zabbix server: Value cache operating mode |<p>Value cache operating mode</p> |INTERNAL |zabbix[vcache,cache,mode] |
-|Zabbix server |Zabbix server: Version |<p>Version of Zabbix server.</p> |INTERNAL |zabbix[version]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Zabbix server |Zabbix server: VMware cache, % used |<p>Availability statistics of Zabbix vmware cache. Percentage of used buffer</p> |INTERNAL |zabbix[vmware,buffer,pused] |
-|Zabbix server |Zabbix server: History write cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used history buffer.</p><p>History cache is used to store item values. A high number indicates performance problems on the database side.</p> |INTERNAL |zabbix[wcache,history,pused] |
-|Zabbix server |Zabbix server: History index cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used history index buffer.</p><p>History index cache is used to index values stored in history cache.</p> |INTERNAL |zabbix[wcache,index,pused] |
-|Zabbix server |Zabbix server: Trend write cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used trend buffer.</p><p>Trend cache stores aggregate for the current hour for all items that receive data.</p> |INTERNAL |zabbix[wcache,trend,pused] |
-|Zabbix server |Zabbix server: Number of processed values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Total number of values processed by Zabbix server or Zabbix proxy, except unsupported items.</p> |INTERNAL |zabbix[wcache,values]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Zabbix server: Number of processed numeric (float) values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed float values.</p> |INTERNAL |zabbix[wcache,values,float]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Zabbix server: Number of processed log values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed log values.</p> |INTERNAL |zabbix[wcache,values,log]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Zabbix server: Number of processed not supported values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of times item processing resulted in item becoming unsupported or keeping that state.</p> |INTERNAL |zabbix[wcache,values,not supported]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Zabbix server: Number of processed character values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed character/string values.</p> |INTERNAL |zabbix[wcache,values,str]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Zabbix server: Number of processed text values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed text values.</p> |INTERNAL |zabbix[wcache,values,text]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Zabbix server: LLD queue |<p>Count of values enqueued in the low-level discovery processing queue.</p> |INTERNAL |zabbix[lld_queue] |
-|Zabbix server |Zabbix server: Preprocessing queue |<p>Count of values enqueued in the preprocessing queue.</p> |INTERNAL |zabbix[preprocessing_queue] |
-|Zabbix server |Zabbix server: Number of processed numeric (unsigned) values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed numeric (unsigned) values.</p> |INTERNAL |zabbix[wcache,values,uint]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND |
+|Cluster |Cluster node [{#NODE.NAME}]: Address |<p>Node IPv4 address.</p> |DEPENDENT |zabbix.nodes.address[{#NODE.ID}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.id=="{#NODE.ID}")].address.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p> |
+|Cluster |Cluster node [{#NODE.NAME}]: Last access time |<p>Last access time.</p> |DEPENDENT |zabbix.nodes.lastaccess.time[{#NODE.ID}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.id=="{#NODE.ID}")].lastaccess.first()`</p> |
+|Cluster |Cluster node [{#NODE.NAME}]: Last access age |<p>Time between database unix_timestamp() and last access time.</p> |DEPENDENT |zabbix.nodes.lastaccess.age[{#NODE.ID}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.id=="{#NODE.ID}")].lastaccess_age.first()`</p> |
+|Cluster |Cluster node [{#NODE.NAME}]: Status |<p>Cluster node status.</p> |DEPENDENT |zabbix.nodes.status[{#NODE.ID}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.[?(@.id=="{#NODE.ID}")].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p> |
+|Zabbix_raw_items |Zabbix stats cluster |<p>Zabbix cluster statistics master item.</p> |INTERNAL |zabbix[cluster,discovery,nodes] |
+|Zabbix_server |Zabbix server: Queue over 10 minutes |<p>Number of monitored items in the queue which are delayed at least by 10 minutes.</p> |INTERNAL |zabbix[queue,10m] |
+|Zabbix_server |Zabbix server: Queue |<p>Number of monitored items in the queue which are delayed at least by 6 seconds.</p> |INTERNAL |zabbix[queue] |
+|Zabbix_server |Zabbix server: Utilization of alert manager internal processes, in % |<p>Average percentage of time alert manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,alert manager,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of alert syncer internal processes, in % |<p>Average percentage of time alert syncer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,alert syncer,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of alerter internal processes, in % |<p>Average percentage of time alerter processes have been busy in the last minute</p> |INTERNAL |zabbix[process,alerter,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of availability manager internal processes, in % |<p>Average percentage of time availability manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,availability manager,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of configuration syncer internal processes, in % |<p>Average percentage of time configuration syncer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,configuration syncer,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of discoverer data collector processes, in % |<p>Average percentage of time discoverer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,discoverer,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of escalator internal processes, in % |<p>Average percentage of time escalator processes have been busy in the last minute</p> |INTERNAL |zabbix[process,escalator,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of history poller data collector processes, in % |<p>Average percentage of time history poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,history poller,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of history syncer internal processes, in % |<p>Average percentage of time history syncer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,history syncer,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of housekeeper internal processes, in % |<p>Average percentage of time housekeeper processes have been busy in the last minute</p> |INTERNAL |zabbix[process,housekeeper,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of http poller data collector processes, in % |<p>Average percentage of time http poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,http poller,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of icmp pinger data collector processes, in % |<p>Average percentage of time icmp pinger processes have been busy in the last minute</p> |INTERNAL |zabbix[process,icmp pinger,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of ipmi manager internal processes, in % |<p>Average percentage of time ipmi manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,ipmi manager,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of ipmi poller data collector processes, in % |<p>Average percentage of time ipmi poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,ipmi poller,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of java poller data collector processes, in % |<p>Average percentage of time java poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,java poller,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of LLD manager internal processes, in % |<p>Average percentage of time lld manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,lld manager,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of LLD worker internal processes, in % |<p>Average percentage of time lld worker processes have been busy in the last minute</p> |INTERNAL |zabbix[process,lld worker,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of poller data collector processes, in % |<p>Average percentage of time poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,poller,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of preprocessing worker internal processes, in % |<p>Average percentage of time preprocessing worker processes have been busy in the last minute</p> |INTERNAL |zabbix[process,preprocessing worker,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of preprocessing manager internal processes, in % |<p>Average percentage of time preprocessing manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,preprocessing manager,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of proxy poller data collector processes, in % |<p>Average percentage of time proxy poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,proxy poller,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of report manager internal processes, in % |<p>Average percentage of time report manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,report manager,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of report writer internal processes, in % |<p>Average percentage of time report writer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,report writer,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of self-monitoring internal processes, in % |<p>Average percentage of time self-monitoring processes have been busy in the last minute</p> |INTERNAL |zabbix[process,self-monitoring,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of snmp trapper data collector processes, in % |<p>Average percentage of time snmp trapper processes have been busy in the last minute</p> |INTERNAL |zabbix[process,snmp trapper,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of task manager internal processes, in % |<p>Average percentage of time task manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,task manager,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of timer internal processes, in % |<p>Average percentage of time timer processes have been busy in the last minute</p> |INTERNAL |zabbix[process,timer,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of service manager internal processes, in % |<p>Average percentage of time service manager processes have been busy in the last minute</p> |INTERNAL |zabbix[process,service manager,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of trigger housekeeper internal processes, in % |<p>Average percentage of time trigger housekeeper processes have been busy in the last minute</p> |INTERNAL |zabbix[process,trigger housekeeper,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of trapper data collector processes, in % |<p>Average percentage of time trapper processes have been busy in the last minute</p> |INTERNAL |zabbix[process,trapper,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of unreachable poller data collector processes, in % |<p>Average percentage of time unreachable poller processes have been busy in the last minute</p> |INTERNAL |zabbix[process,unreachable poller,avg,busy] |
+|Zabbix_server |Zabbix server: Utilization of vmware data collector processes, in % |<p>Average percentage of time vmware collector processes have been busy in the last minute</p> |INTERNAL |zabbix[process,vmware collector,avg,busy] |
+|Zabbix_server |Zabbix server: Configuration cache, % used |<p>Availability statistics of Zabbix configuration cache. Percentage of used buffer.</p> |INTERNAL |zabbix[rcache,buffer,pused] |
+|Zabbix_server |Zabbix server: Trend function cache, % unique requests |<p>Effectiveness statistics of the Zabbix trend function cache. Percentage of cached items from cached items + requests. Low percentage most likely means that the cache size can be reduced.</p> |INTERNAL |zabbix[tcache,cache,pitems] |
+|Zabbix_server |Zabbix server: Trend function cache, % misses |<p>Effectiveness statistics of the Zabbix trend function cache. Percentage of cache misses.</p> |INTERNAL |zabbix[tcache,cache,pmisses] |
+|Zabbix_server |Zabbix server: Value cache, % used |<p>Availability statistics of Zabbix value cache. Percentage of used buffer.</p> |INTERNAL |zabbix[vcache,buffer,pused] |
+|Zabbix_server |Zabbix server: Value cache hits |<p>Effectiveness statistics of Zabbix value cache. Number of cache hits (history values taken from the cache).</p> |INTERNAL |zabbix[vcache,cache,hits]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Zabbix server: Value cache misses |<p>Effectiveness statistics of Zabbix value cache. Number of cache misses (history values taken from the database).</p> |INTERNAL |zabbix[vcache,cache,misses]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Zabbix server: Value cache operating mode |<p>Value cache operating mode.</p> |INTERNAL |zabbix[vcache,cache,mode] |
+|Zabbix_server |Zabbix server: Version |<p>Version of Zabbix server.</p> |INTERNAL |zabbix[version]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Zabbix_server |Zabbix server: VMware cache, % used |<p>Availability statistics of Zabbix vmware cache. Percentage of used buffer.</p> |INTERNAL |zabbix[vmware,buffer,pused] |
+|Zabbix_server |Zabbix server: History write cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used history buffer.</p><p>History cache is used to store item values. A high number indicates performance problems on the database side.</p> |INTERNAL |zabbix[wcache,history,pused] |
+|Zabbix_server |Zabbix server: History index cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used history index buffer.</p><p>History index cache is used to index values stored in history cache.</p> |INTERNAL |zabbix[wcache,index,pused] |
+|Zabbix_server |Zabbix server: Trend write cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used trend buffer.</p><p>Trend cache stores aggregate for the current hour for all items that receive data.</p> |INTERNAL |zabbix[wcache,trend,pused] |
+|Zabbix_server |Zabbix server: Number of processed values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Total number of values processed by Zabbix server or Zabbix proxy, except unsupported items.</p> |INTERNAL |zabbix[wcache,values]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Zabbix server: Number of processed numeric (float) values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed float values.</p> |INTERNAL |zabbix[wcache,values,float]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Zabbix server: Number of processed log values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed log values.</p> |INTERNAL |zabbix[wcache,values,log]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Zabbix server: Number of processed not supported values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of times item processing resulted in item becoming unsupported or keeping that state.</p> |INTERNAL |zabbix[wcache,values,not supported]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Zabbix server: Number of processed character values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed character/string values.</p> |INTERNAL |zabbix[wcache,values,str]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Zabbix server: Number of processed text values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed text values.</p> |INTERNAL |zabbix[wcache,values,text]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Zabbix server: LLD queue |<p>Count of values enqueued in the low-level discovery processing queue.</p> |INTERNAL |zabbix[lld_queue] |
+|Zabbix_server |Zabbix server: Preprocessing queue |<p>Count of values enqueued in the preprocessing queue.</p> |INTERNAL |zabbix[preprocessing_queue] |
+|Zabbix_server |Zabbix server: Number of processed numeric (unsigned) values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed numeric (unsigned) values.</p> |INTERNAL |zabbix[wcache,values,uint]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Zabbix server: More than 100 items having missing data for more than 10 minutes |<p>zabbix[stats,{$IP},{$PORT},queue,10m] item is collecting data about how many items are missing data for more than 10 minutes</p> |`{TEMPLATE_NAME:zabbix[queue,10m].min(10m)}>100` |WARNING | |
-|Zabbix server: Utilization of alert manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,alert manager,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,alert manager,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of alert syncer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,alert syncer,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,alert syncer,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of alerter processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,alerter,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,alerter,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of availability manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,availability manager,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,availability manager,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of configuration syncer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,configuration syncer,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,configuration syncer,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of discoverer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,discoverer,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,discoverer,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of escalator processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,escalator,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,escalator,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of history poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,history poller,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,history poller,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of history syncer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,history syncer,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,history syncer,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of housekeeper processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,housekeeper,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,housekeeper,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of http poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,http poller,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,http poller,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of icmp pinger processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,icmp pinger,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,icmp pinger,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of ipmi manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,ipmi manager,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,ipmi manager,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of ipmi poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,ipmi poller,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,ipmi poller,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of java poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,java poller,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,java poller,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of lld manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,lld manager,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,lld manager,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of lld worker processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,lld worker,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,lld worker,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,poller,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,poller,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of preprocessing worker processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,preprocessing worker,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,preprocessing worker,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of preprocessing manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,preprocessing manager,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,preprocessing manager,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of proxy poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,proxy poller,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,proxy poller,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of report manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,report manager,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,report manager,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of report writer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,report writer,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,report writer,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of self-monitoring processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,self-monitoring,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,self-monitoring,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of snmp trapper processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,snmp trapper,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,snmp trapper,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of task manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,task manager,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,task manager,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of timer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,timer,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,timer,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of trapper processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,trapper,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,trapper,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of unreachable poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,unreachable poller,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,unreachable poller,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: Utilization of vmware collector processes over 75% |<p>-</p> |`{TEMPLATE_NAME:zabbix[process,vmware collector,avg,busy].avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:zabbix[process,vmware collector,avg,busy].avg(10m)}<65` |AVERAGE | |
-|Zabbix server: More than 75% used in the configuration cache |<p>Consider increasing CacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:zabbix[rcache,buffer,pused].max(10m)}>75` |AVERAGE | |
-|Zabbix server: More than 95% used in the value cache |<p>Consider increasing ValueCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:zabbix[vcache,buffer,pused].max(10m)}>95` |AVERAGE | |
-|Zabbix server: Zabbix value cache working in low memory mode |<p>Once the low memory mode has been switched on, the value cache will remain in this state for 24 hours, even if the problem that triggered this mode is resolved sooner.</p> |`{TEMPLATE_NAME:zabbix[vcache,cache,mode].last()}=1` |HIGH | |
-|Zabbix server: Version has changed (new version: {ITEM.VALUE}) |<p>Zabbix server version has changed. Ack to close.</p> |`{TEMPLATE_NAME:zabbix[version].diff()}=1 and {TEMPLATE_NAME:zabbix[version].strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Zabbix server: More than 75% used in the vmware cache |<p>Consider increasing VMwareCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:zabbix[vmware,buffer,pused].max(10m)}>75` |AVERAGE | |
-|Zabbix server: More than 75% used in the history cache |<p>Consider increasing HistoryCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:zabbix[wcache,history,pused].max(10m)}>75` |AVERAGE | |
-|Zabbix server: More than 75% used in the history index cache |<p>Consider increasing HistoryIndexCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:zabbix[wcache,index,pused].max(10m)}>75` |AVERAGE | |
-|Zabbix server: More than 75% used in the trends cache |<p>Consider increasing TrendCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:zabbix[wcache,trend,pused].max(10m)}>75` |AVERAGE | |
+|Cluster node [{#NODE.NAME}]: Status changed |<p>The state of the node has changed. Confirm to close.</p> |`last(/Zabbix server health/zabbix.nodes.status[{#NODE.ID}],#1)<>last(/Zabbix server health/zabbix.nodes.status[{#NODE.ID}],#2)` |INFO |<p>Manual close: YES</p> |
+|Zabbix server: More than 100 items having missing data for more than 10 minutes |<p>zabbix[stats,{$IP},{$PORT},queue,10m] item is collecting data about how many items are missing data for more than 10 minutes.</p> |`min(/Zabbix server health/zabbix[queue,10m],10m)>100` |WARNING | |
+|Zabbix server: Utilization of alert manager processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,alert manager,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,alert manager,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of alert syncer processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,alert syncer,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,alert syncer,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of alerter processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,alerter,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,alerter,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of availability manager processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,availability manager,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,availability manager,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of configuration syncer processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,configuration syncer,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,configuration syncer,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of discoverer processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,discoverer,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,discoverer,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of escalator processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,escalator,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,escalator,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of history poller processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,history poller,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,history poller,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of history syncer processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,history syncer,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,history syncer,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of housekeeper processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,housekeeper,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,housekeeper,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of http poller processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,http poller,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,http poller,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of icmp pinger processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,icmp pinger,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,icmp pinger,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of ipmi manager processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,ipmi manager,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,ipmi manager,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of ipmi poller processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,ipmi poller,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,ipmi poller,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of java poller processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,java poller,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,java poller,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of lld manager processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,lld manager,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,lld manager,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of lld worker processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,lld worker,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,lld worker,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of poller processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,poller,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,poller,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of preprocessing worker processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,preprocessing worker,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,preprocessing worker,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of preprocessing manager processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,preprocessing manager,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,preprocessing manager,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of proxy poller processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,proxy poller,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,proxy poller,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of report manager processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,report manager,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,report manager,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of report writer processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,report writer,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,report writer,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of self-monitoring processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,self-monitoring,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,self-monitoring,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of snmp trapper processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,snmp trapper,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,snmp trapper,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of task manager processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,task manager,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,task manager,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of timer processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,timer,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,timer,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of service manager processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,service manager,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,service manager,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of trigger housekeeper processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,trigger housekeeper,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,trigger housekeeper,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of trapper processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,trapper,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,trapper,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of unreachable poller processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,unreachable poller,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,unreachable poller,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: Utilization of vmware collector processes over 75% |<p>-</p> |`avg(/Zabbix server health/zabbix[process,vmware collector,avg,busy],10m)>75`<p>Recovery expression:</p>`avg(/Zabbix server health/zabbix[process,vmware collector,avg,busy],10m)<65` |AVERAGE | |
+|Zabbix server: More than 75% used in the configuration cache |<p>Consider increasing CacheSize in the zabbix_server.conf configuration file.</p> |`max(/Zabbix server health/zabbix[rcache,buffer,pused],10m)>75` |AVERAGE | |
+|Zabbix server: More than 95% used in the value cache |<p>Consider increasing ValueCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Zabbix server health/zabbix[vcache,buffer,pused],10m)>95` |AVERAGE | |
+|Zabbix server: Zabbix value cache working in low memory mode |<p>Once the low memory mode has been switched on, the value cache will remain in this state for 24 hours, even if the problem that triggered this mode is resolved sooner.</p> |`last(/Zabbix server health/zabbix[vcache,cache,mode])=1` |HIGH | |
+|Zabbix server: Version has changed (new version: {ITEM.VALUE}) |<p>Zabbix server version has changed. Ack to close.</p> |`last(/Zabbix server health/zabbix[version],#1)<>last(/Zabbix server health/zabbix[version],#2) and length(last(/Zabbix server health/zabbix[version]))>0` |INFO |<p>Manual close: YES</p> |
+|Zabbix server: More than 75% used in the vmware cache |<p>Consider increasing VMwareCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Zabbix server health/zabbix[vmware,buffer,pused],10m)>75` |AVERAGE | |
+|Zabbix server: More than 75% used in the history cache |<p>Consider increasing HistoryCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Zabbix server health/zabbix[wcache,history,pused],10m)>75` |AVERAGE | |
+|Zabbix server: More than 75% used in the history index cache |<p>Consider increasing HistoryIndexCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Zabbix server health/zabbix[wcache,index,pused],10m)>75` |AVERAGE | |
+|Zabbix server: More than 75% used in the trends cache |<p>Consider increasing TrendCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Zabbix server health/zabbix[wcache,trend,pused],10m)>75` |AVERAGE | |
## Feedback
diff --git a/templates/app/zabbix_server/template_app_zabbix_server.yaml b/templates/app/zabbix_server/template_app_zabbix_server.yaml
index 39311dc8d24..c1fd796edc0 100644
--- a/templates/app/zabbix_server/template_app_zabbix_server.yaml
+++ b/templates/app/zabbix_server/template_app_zabbix_server.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T10:31:39Z'
+ date: '2021-12-19T15:20:00Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -10,12 +10,25 @@ zabbix_export:
uuid: e2d2b4e4ac28483996cc11fe42823d57
template: 'Zabbix server health'
name: 'Zabbix server health'
- description: 'Template tooling version used: 0.38'
+ description: 'Template tooling version used: 0.40'
groups:
-
name: Templates/Applications
items:
-
+ uuid: f9d60d6dcbe14cd4aaec08aec6ca1856
+ name: 'Zabbix stats cluster'
+ type: INTERNAL
+ key: 'zabbix[cluster,discovery,nodes]'
+ history: '0'
+ trends: '0'
+ value_type: TEXT
+ description: 'Zabbix cluster statistics master item.'
+ tags:
+ -
+ tag: Application
+ value: 'Zabbix raw items'
+ -
uuid: d596a84dece14aa59ca53e9f53b21edb
name: 'Zabbix server: LLD queue'
type: INTERNAL
@@ -542,6 +555,27 @@ zabbix_export:
name: 'Zabbix server: Utilization of self-monitoring processes over 75%'
priority: AVERAGE
-
+ uuid: 4f6c2b59437c4345b348d78b5d2242db
+ name: 'Zabbix server: Utilization of service manager internal processes, in %'
+ type: INTERNAL
+ key: 'zabbix[process,service manager,avg,busy]'
+ history: 1w
+ value_type: FLOAT
+ units: '%'
+ description: 'Average percentage of time service manager processes have been busy in the last minute'
+ tags:
+ -
+ tag: Application
+ value: 'Zabbix server'
+ triggers:
+ -
+ uuid: c4cdbce205e843d6b882503961a15376
+ expression: 'avg(/Zabbix server health/zabbix[process,service manager,avg,busy],10m)>75'
+ recovery_mode: RECOVERY_EXPRESSION
+ recovery_expression: 'avg(/Zabbix server health/zabbix[process,service manager,avg,busy],10m)<65'
+ name: 'Zabbix server: Utilization of service manager processes over 75%'
+ priority: AVERAGE
+ -
uuid: cb9a7c4f3373496da97f3e0b8c978366
name: 'Zabbix server: Utilization of snmp trapper data collector processes, in %'
type: INTERNAL
@@ -626,6 +660,27 @@ zabbix_export:
name: 'Zabbix server: Utilization of trapper processes over 75%'
priority: AVERAGE
-
+ uuid: ad2199d0152d4deb8259699ebd1f3da5
+ name: 'Zabbix server: Utilization of trigger housekeeper internal processes, in %'
+ type: INTERNAL
+ key: 'zabbix[process,trigger housekeeper,avg,busy]'
+ history: 1w
+ value_type: FLOAT
+ units: '%'
+ description: 'Average percentage of time trigger housekeeper processes have been busy in the last minute'
+ tags:
+ -
+ tag: Application
+ value: 'Zabbix server'
+ triggers:
+ -
+ uuid: 5ffa02f393d34c49bcb9eed2b2b89704
+ expression: 'avg(/Zabbix server health/zabbix[process,trigger housekeeper,avg,busy],10m)>75'
+ recovery_mode: RECOVERY_EXPRESSION
+ recovery_expression: 'avg(/Zabbix server health/zabbix[process,trigger housekeeper,avg,busy],10m)<65'
+ name: 'Zabbix server: Utilization of trigger housekeeper processes over 75%'
+ priority: AVERAGE
+ -
uuid: 4fd24bdb8935435e8617c9607b9c6ca1
name: 'Zabbix server: Utilization of unreachable poller data collector processes, in %'
type: INTERNAL
@@ -673,7 +728,7 @@ zabbix_export:
type: INTERNAL
key: 'zabbix[queue,10m]'
history: 1w
- description: 'Number of monitored items in the queue which are delayed at least by 10 minutes'
+ description: 'Number of monitored items in the queue which are delayed at least by 10 minutes.'
tags:
-
tag: Application
@@ -684,14 +739,14 @@ zabbix_export:
expression: 'min(/Zabbix server health/zabbix[queue,10m],10m)>100'
name: 'Zabbix server: More than 100 items having missing data for more than 10 minutes'
priority: WARNING
- description: 'zabbix[stats,{$IP},{$PORT},queue,10m] item is collecting data about how many items are missing data for more than 10 minutes'
+ description: 'zabbix[stats,{$IP},{$PORT},queue,10m] item is collecting data about how many items are missing data for more than 10 minutes.'
-
uuid: aaf58555c0774e848f51a351b6ae8462
name: 'Zabbix server: Queue'
type: INTERNAL
key: 'zabbix[queue]'
history: 1w
- description: 'Number of monitored items in the queue which are delayed at least by 6 seconds'
+ description: 'Number of monitored items in the queue which are delayed at least by 6 seconds.'
tags:
-
tag: Application
@@ -704,7 +759,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: 'Availability statistics of Zabbix configuration cache. Percentage of used buffer'
+ description: 'Availability statistics of Zabbix configuration cache. Percentage of used buffer.'
tags:
-
tag: Application
@@ -715,7 +770,7 @@ zabbix_export:
expression: 'max(/Zabbix server health/zabbix[rcache,buffer,pused],10m)>75'
name: 'Zabbix server: More than 75% used in the configuration cache'
priority: AVERAGE
- description: 'Consider increasing CacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing CacheSize in the zabbix_server.conf configuration file.'
-
uuid: b14a5ba6c93f4d6c9dfd820640d34e9b
name: 'Zabbix server: Trend function cache, % unique requests'
@@ -737,7 +792,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: "Effectiveness statistics of the Zabbix trend function cache.\tPercentage of cache misses"
+ description: 'Effectiveness statistics of the Zabbix trend function cache. Percentage of cache misses.'
tags:
-
tag: Application
@@ -750,7 +805,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: "Availability statistics of Zabbix value cache.\tPercentage of used buffer"
+ description: 'Availability statistics of Zabbix value cache. Percentage of used buffer.'
tags:
-
tag: Application
@@ -761,7 +816,7 @@ zabbix_export:
expression: 'max(/Zabbix server health/zabbix[vcache,buffer,pused],10m)>95'
name: 'Zabbix server: More than 95% used in the value cache'
priority: AVERAGE
- description: 'Consider increasing ValueCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing ValueCacheSize in the zabbix_server.conf configuration file.'
-
uuid: 69b14487a9744dbb866f2e6ee131d0e5
name: 'Zabbix server: Value cache hits'
@@ -770,7 +825,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: vps
- description: 'Effectiveness statistics of Zabbix value cache. Number of cache hits (history values taken from the cache)'
+ description: 'Effectiveness statistics of Zabbix value cache. Number of cache hits (history values taken from the cache).'
preprocessing:
-
type: CHANGE_PER_SECOND
@@ -788,7 +843,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: vps
- description: 'Effectiveness statistics of Zabbix value cache. Number of cache misses (history values taken from the database)'
+ description: 'Effectiveness statistics of Zabbix value cache. Number of cache misses (history values taken from the database).'
preprocessing:
-
type: CHANGE_PER_SECOND
@@ -804,7 +859,7 @@ zabbix_export:
type: INTERNAL
key: 'zabbix[vcache,cache,mode]'
history: 1w
- description: 'Value cache operating mode'
+ description: 'Value cache operating mode.'
valuemap:
name: 'Value cache operating mode'
tags:
@@ -852,7 +907,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: 'Availability statistics of Zabbix vmware cache. Percentage of used buffer'
+ description: 'Availability statistics of Zabbix vmware cache. Percentage of used buffer.'
tags:
-
tag: Application
@@ -863,7 +918,7 @@ zabbix_export:
expression: 'max(/Zabbix server health/zabbix[vmware,buffer,pused],10m)>75'
name: 'Zabbix server: More than 75% used in the vmware cache'
priority: AVERAGE
- description: 'Consider increasing VMwareCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing VMwareCacheSize in the zabbix_server.conf configuration file.'
-
uuid: 1ae8253bb4da434da3ea6bcb0352115d
name: 'Zabbix server: History write cache, % used'
@@ -885,7 +940,7 @@ zabbix_export:
expression: 'max(/Zabbix server health/zabbix[wcache,history,pused],10m)>75'
name: 'Zabbix server: More than 75% used in the history cache'
priority: AVERAGE
- description: 'Consider increasing HistoryCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing HistoryCacheSize in the zabbix_server.conf configuration file.'
-
uuid: 9fc7faf5455e4e78bdb1406f947bfa08
name: 'Zabbix server: History index cache, % used'
@@ -907,7 +962,7 @@ zabbix_export:
expression: 'max(/Zabbix server health/zabbix[wcache,index,pused],10m)>75'
name: 'Zabbix server: More than 75% used in the history index cache'
priority: AVERAGE
- description: 'Consider increasing HistoryIndexCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing HistoryIndexCacheSize in the zabbix_server.conf configuration file.'
-
uuid: 5cca0fc9878d4e068467d5a3b8558ed5
name: 'Zabbix server: Trend write cache, % used'
@@ -929,7 +984,7 @@ zabbix_export:
expression: 'max(/Zabbix server health/zabbix[wcache,trend,pused],10m)>75'
name: 'Zabbix server: More than 75% used in the trends cache'
priority: AVERAGE
- description: 'Consider increasing TrendCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing TrendCacheSize in the zabbix_server.conf configuration file.'
-
uuid: de63f78f36e74361b6c9993d12d8b7c8
name: 'Zabbix server: Number of processed numeric (float) values per second'
@@ -1063,6 +1118,123 @@ zabbix_export:
-
tag: Application
value: 'Zabbix server'
+ discovery_rules:
+ -
+ uuid: 32bf5c3b8f1f4d10b26806ae0d2adb25
+ name: 'High availability cluster node discovery'
+ type: DEPENDENT
+ key: zabbix.nodes.discovery
+ delay: '0'
+ description: 'LLD rule with item and trigger prototypes for node discovery.'
+ item_prototypes:
+ -
+ uuid: c5cb7ffbd314494cb1801ae804e9c76e
+ name: 'Cluster node [{#NODE.NAME}]: Address'
+ type: DEPENDENT
+ key: 'zabbix.nodes.address[{#NODE.ID}]'
+ delay: '0'
+ history: 1w
+ trends: '0'
+ value_type: CHAR
+ description: 'Node IPv4 address.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[?(@.id=="{#NODE.ID}")].address.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ master_item:
+ key: 'zabbix[cluster,discovery,nodes]'
+ tags:
+ -
+ tag: Application
+ value: 'Cluster node [{#NODE.NAME}]'
+ -
+ uuid: f444de7cdd7a4f3bb7f4b1f2e86e16d7
+ name: 'Cluster node [{#NODE.NAME}]: Last access age'
+ type: DEPENDENT
+ key: 'zabbix.nodes.lastaccess.age[{#NODE.ID}]'
+ delay: '0'
+ history: 1w
+ units: uptime
+ description: 'Time between database unix_timestamp() and last access time.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[?(@.id=="{#NODE.ID}")].lastaccess_age.first()'
+ master_item:
+ key: 'zabbix[cluster,discovery,nodes]'
+ tags:
+ -
+ tag: Application
+ value: 'Cluster node [{#NODE.NAME}]'
+ -
+ uuid: 2e33f747803b45d7a9b94c171bf36f61
+ name: 'Cluster node [{#NODE.NAME}]: Last access time'
+ type: DEPENDENT
+ key: 'zabbix.nodes.lastaccess.time[{#NODE.ID}]'
+ delay: '0'
+ history: 1w
+ units: unixtime
+ description: 'Last access time.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[?(@.id=="{#NODE.ID}")].lastaccess.first()'
+ master_item:
+ key: 'zabbix[cluster,discovery,nodes]'
+ tags:
+ -
+ tag: Application
+ value: 'Cluster node [{#NODE.NAME}]'
+ -
+ uuid: 1e4de6a913764a22862c0265d329b592
+ name: 'Cluster node [{#NODE.NAME}]: Status'
+ type: DEPENDENT
+ key: 'zabbix.nodes.status[{#NODE.ID}]'
+ delay: '0'
+ history: 1w
+ description: 'Cluster node status.'
+ valuemap:
+ name: 'Cluster node status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[?(@.id=="{#NODE.ID}")].status.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ master_item:
+ key: 'zabbix[cluster,discovery,nodes]'
+ tags:
+ -
+ tag: Application
+ value: 'Cluster node [{#NODE.NAME}]'
+ trigger_prototypes:
+ -
+ uuid: 8d48978fb5724d19a950aa6eb8a3dd75
+ expression: 'last(/Zabbix server health/zabbix.nodes.status[{#NODE.ID}],#1)<>last(/Zabbix server health/zabbix.nodes.status[{#NODE.ID}],#2)'
+ name: 'Cluster node [{#NODE.NAME}]: Status changed'
+ opdata: 'Current value: {ITEM.LASTVALUE1}'
+ priority: INFO
+ description: 'The state of the node has changed. Confirm to close.'
+ manual_close: 'YES'
+ master_item:
+ key: 'zabbix[cluster,discovery,nodes]'
+ lld_macro_paths:
+ -
+ lld_macro: '{#NODE.ID}'
+ path: $.id
+ -
+ lld_macro: '{#NODE.NAME}'
+ path: $.name
dashboards:
-
uuid: 6815a90fb9b745fd8f1b1f47697893d6
@@ -1239,6 +1411,22 @@ zabbix_export:
host: 'Zabbix server health'
valuemaps:
-
+ uuid: cee1681b12ff49b6b7d442a851e870b4
+ name: 'Cluster node status'
+ mappings:
+ -
+ value: '0'
+ newvalue: Standby
+ -
+ value: '1'
+ newvalue: Stopped
+ -
+ value: '2'
+ newvalue: Unavailable
+ -
+ value: '3'
+ newvalue: Active
+ -
uuid: 97bbea700550483bbd8405b4dd9d8005
name: 'Value cache operating mode'
mappings:
@@ -1709,6 +1897,18 @@ zabbix_export:
item:
host: 'Zabbix server health'
key: 'zabbix[process,report writer,avg,busy]'
+ -
+ sortorder: '18'
+ color: 790E1F
+ item:
+ host: 'Zabbix server health'
+ key: 'zabbix[process,service manager,avg,busy]'
+ -
+ sortorder: '19'
+ color: 87AC4D
+ item:
+ host: 'Zabbix server health'
+ key: 'zabbix[process,trigger housekeeper,avg,busy]'
-
uuid: b053dc416ee3422b94e46fb415ea87e6
name: 'Zabbix server: Zabbix internal queues'
diff --git a/templates/app/zabbix_server_remote/README.md b/templates/app/zabbix_server_remote/README.md
index e928c057618..71c7d54607f 100644
--- a/templates/app/zabbix_server_remote/README.md
+++ b/templates/app/zabbix_server_remote/README.md
@@ -26,109 +26,121 @@ There are no template links in this template.
## Discovery rules
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|High availability cluster node discovery |<p>LLD rule with item and trigger prototypes for node discovery.</p> |DEPENDENT |zabbix.nodes.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.ha`</p> |
## Items collected
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
+|Cluster |Cluster node [{#NODE.NAME}]: Address |<p>Node IPv4 address.</p> |DEPENDENT |zabbix.nodes.address[{#NODE.ID}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.ha[?(@.id=="{#NODE.ID}")].address.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p> |
+|Cluster |Cluster node [{#NODE.NAME}]: Last access time |<p>Last access time.</p> |DEPENDENT |zabbix.nodes.lastaccess.time[{#NODE.ID}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.ha[?(@.id=="{#NODE.ID}")].lastaccess.first()`</p> |
+|Cluster |Cluster node [{#NODE.NAME}]: Last access age |<p>Time between database unix_timestamp() and last access time.</p> |DEPENDENT |zabbix.nodes.lastaccess.age[{#NODE.ID}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.ha[?(@.id=="{#NODE.ID}")].lastaccess_age.first()`</p> |
+|Cluster |Cluster node [{#NODE.NAME}]: Status |<p>Node status.</p> |DEPENDENT |zabbix.nodes.status[{#NODE.ID}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.ha[?(@.id=="{#NODE.ID}")].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p> |
|Zabbix_raw_items |Remote Zabbix server: Zabbix stats |<p>Zabbix server statistics master item.</p> |INTERNAL |zabbix[stats,{$ADDRESS},{$PORT}] |
-|Zabbix server |Remote Zabbix server: Zabbix stats queue over 10m |<p>Number of monitored items in the queue which are delayed at least by 10 minutes</p> |INTERNAL |zabbix[stats,{$ADDRESS},{$PORT},queue,10m]<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue`</p> |
-|Zabbix server |Remote Zabbix server: Zabbix stats queue |<p>Number of monitored items in the queue which are delayed at least by 6 seconds</p> |INTERNAL |zabbix[stats,{$ADDRESS},{$PORT},queue]<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of alert manager internal processes, in % |<p>Average percentage of time alert manager processes have been busy in the last minute</p> |DEPENDENT |process.alert_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['alert manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes alert manager not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of alert syncer internal processes, in % |<p>Average percentage of time alert syncer processes have been busy in the last minute</p> |DEPENDENT |process.alert_syncer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['alert syncer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes alert syncer not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of alerter internal processes, in % |<p>Average percentage of time alerter processes have been busy in the last minute</p> |DEPENDENT |process.alerter.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['alerter'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes alerter not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of availability manager internal processes, in % |<p>Average percentage of time availability manager processes have been busy in the last minute</p> |DEPENDENT |process.availability_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['availability manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes availability manager not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of configuration syncer internal processes, in % |<p>Average percentage of time configuration syncer processes have been busy in the last minute</p> |DEPENDENT |process.configuration_syncer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['configuration syncer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes configuration syncer not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of discoverer data collector processes, in % |<p>Average percentage of time discoverer processes have been busy in the last minute</p> |DEPENDENT |process.discoverer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['discoverer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes discoverer not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of escalator internal processes, in % |<p>Average percentage of time escalator processes have been busy in the last minute</p> |DEPENDENT |process.escalator.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['escalator'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes escalator not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of history poller data collector processes, in % |<p>Average percentage of time history poller processes have been busy in the last minute</p> |DEPENDENT |process.history_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['history poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes history poller not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of history syncer internal processes, in % |<p>Average percentage of time history syncer processes have been busy in the last minute</p> |DEPENDENT |process.history_syncer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['history syncer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes history syncer not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of housekeeper internal processes, in % |<p>Average percentage of time housekeeper processes have been busy in the last minute</p> |DEPENDENT |process.housekeeper.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['housekeeper'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes housekeeper not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of http poller data collector processes, in % |<p>Average percentage of time http poller processes have been busy in the last minute</p> |DEPENDENT |process.http_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['http poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes http poller not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of icmp pinger data collector processes, in % |<p>Average percentage of time icmp pinger processes have been busy in the last minute</p> |DEPENDENT |process.icmp_pinger.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['icmp pinger'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes icmp pinger not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of ipmi manager internal processes, in % |<p>Average percentage of time ipmi manager processes have been busy in the last minute</p> |DEPENDENT |process.ipmi_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['ipmi manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes ipmi manager not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of ipmi poller data collector processes, in % |<p>Average percentage of time ipmi poller processes have been busy in the last minute</p> |DEPENDENT |process.ipmi_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['ipmi poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes ipmi poller not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of java poller data collector processes, in % |<p>Average percentage of time java poller processes have been busy in the last minute</p> |DEPENDENT |process.java_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['java poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes java poller not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of LLD manager internal processes, in % |<p>Average percentage of time lld manager processes have been busy in the last minute</p> |DEPENDENT |process.lld_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['lld manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes LLD manager not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of LLD worker internal processes, in % |<p>Average percentage of time lld worker processes have been busy in the last minute</p> |DEPENDENT |process.lld_worker.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['lld worker'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes LLD worker not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of poller data collector processes, in % |<p>Average percentage of time poller processes have been busy in the last minute</p> |DEPENDENT |process.poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes poller not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of preprocessing worker internal processes, in % |<p>Average percentage of time preprocessing worker processes have been busy in the last minute</p> |DEPENDENT |process.preprocessing_worker.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['preprocessing worker'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes preprocessing worker not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of preprocessing manager internal processes, in % |<p>Average percentage of time preprocessing manager processes have been busy in the last minute</p> |DEPENDENT |process.preprocessing_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['preprocessing manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes preprocessing manager not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of proxy poller data collector processes, in % |<p>Average percentage of time proxy poller processes have been busy in the last minute</p> |DEPENDENT |process.proxy_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['proxy poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes proxy poller not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of report manager internal processes, in % |<p>Average percentage of time report manager processes have been busy in the last minute</p> |DEPENDENT |process.report_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['report manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes report manager not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of report writer internal processes, in % |<p>Average percentage of time report writer processes have been busy in the last minute</p> |DEPENDENT |process.report_writer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['report writer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes report writer not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of self-monitoring internal processes, in % |<p>Average percentage of time self-monitoring processes have been busy in the last minute</p> |DEPENDENT |process.self-monitoring.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['self-monitoring'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes self-monitoring not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of snmp trapper data collector processes, in % |<p>Average percentage of time snmp trapper processes have been busy in the last minute</p> |DEPENDENT |process.snmp_trapper.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['snmp trapper'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes snmp trapper not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of task manager internal processes, in % |<p>Average percentage of time task manager processes have been busy in the last minute</p> |DEPENDENT |process.task_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['task manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes task manager not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of timer internal processes, in % |<p>Average percentage of time timer processes have been busy in the last minute</p> |DEPENDENT |process.timer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['timer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes timer not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of trapper data collector processes, in % |<p>Average percentage of time trapper processes have been busy in the last minute</p> |DEPENDENT |process.trapper.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['trapper'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes trapper not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of unreachable poller data collector processes, in % |<p>Average percentage of time unreachable poller processes have been busy in the last minute</p> |DEPENDENT |process.unreachable_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['unreachable poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes unreachable poller not started`</p> |
-|Zabbix server |Remote Zabbix server: Utilization of vmware data collector processes, in % |<p>Average percentage of time vmware collector processes have been busy in the last minute</p> |DEPENDENT |process.vmware_collector.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['vmware collector'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes vmware collector not started`</p> |
-|Zabbix server |Remote Zabbix server: Configuration cache, % used |<p>Availability statistics of Zabbix configuration cache. Percentage of used buffer</p> |DEPENDENT |rcache.buffer.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.rcache.pused`</p> |
-|Zabbix server |Remote Zabbix server: Trend function cache, % unique requests |<p>Effectiveness statistics of the Zabbix trend function cache. Percentage of cached items from cached items + requests. Low percentage most likely means that the cache size can be reduced.</p> |DEPENDENT |tcache.pitems<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.tcache.pitems`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Not supported this version`</p> |
-|Zabbix server |Remote Zabbix server: Trend function cache, % misses |<p>Effectiveness statistics of the Zabbix trend function cache. Percentage of cache misses</p> |DEPENDENT |tcache.pmisses<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.tcache.pmisses`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Not supported this version`</p> |
-|Zabbix server |Remote Zabbix server: Value cache, % used |<p>Availability statistics of Zabbix value cache. Percentage of used buffer</p> |DEPENDENT |vcache.buffer.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vcache.buffer.pused`</p> |
-|Zabbix server |Remote Zabbix server: Value cache hits |<p>Effectiveness statistics of Zabbix value cache. Number of cache hits (history values taken from the cache)</p> |DEPENDENT |vcache.cache.hits<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vcache.cache.hits`</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Remote Zabbix server: Value cache misses |<p>Effectiveness statistics of Zabbix value cache. Number of cache misses (history values taken from the database)</p> |DEPENDENT |vcache.cache.misses<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vcache.cache.misses`</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Remote Zabbix server: Value cache operating mode |<p>Value cache operating mode</p> |DEPENDENT |vcache.cache.mode<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vcache.cache.mode`</p> |
-|Zabbix server |Remote Zabbix server: Version |<p>Version of Zabbix server.</p> |DEPENDENT |version<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
-|Zabbix server |Remote Zabbix server: VMware cache, % used |<p>Availability statistics of Zabbix vmware cache. Percentage of used buffer</p> |DEPENDENT |vmware.buffer.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vmware.pused`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> No vmware collector processes started`</p> |
-|Zabbix server |Remote Zabbix server: History write cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used history buffer.</p><p>History cache is used to store item values. A high number indicates performance problems on the database side.</p> |DEPENDENT |wcache.history.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.history.pused`</p> |
-|Zabbix server |Remote Zabbix server: History index cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used history index buffer.</p><p>History index cache is used to index values stored in history cache.</p> |DEPENDENT |wcache.index.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.index.pused`</p> |
-|Zabbix server |Remote Zabbix server: Trend write cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used trend buffer.</p><p>Trend cache stores aggregate for the current hour for all items that receive data.</p> |DEPENDENT |wcache.trend.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.trend.pused`</p> |
-|Zabbix server |Remote Zabbix server: Number of processed values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Total number of values processed by Zabbix server or Zabbix proxy, except unsupported items.</p> |DEPENDENT |wcache.values<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.all`</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Remote Zabbix server: Number of processed numeric (float) values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed float values.</p> |DEPENDENT |wcache.values.float<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.float`</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Remote Zabbix server: Number of processed log values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed log values.</p> |DEPENDENT |wcache.values.log<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.log`</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Remote Zabbix server: Number of processed not supported values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of times item processing resulted in item becoming unsupported or keeping that state.</p> |DEPENDENT |wcache.values.not_supported<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values['not supported']`</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Remote Zabbix server: Number of processed character values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed character/string values.</p> |DEPENDENT |wcache.values.str<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.str`</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Remote Zabbix server: Number of processed text values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed text values.</p> |DEPENDENT |wcache.values.text<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.text`</p><p>- CHANGE_PER_SECOND |
-|Zabbix server |Remote Zabbix server: LLD queue |<p>Count of values enqueued in the low-level discovery processing queue.</p> |DEPENDENT |lld_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.lld_queue`</p> |
-|Zabbix server |Remote Zabbix server: Preprocessing queue |<p>Count of values enqueued in the preprocessing queue.</p> |DEPENDENT |preprocessing_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.preprocessing_queue`</p> |
-|Zabbix server |Remote Zabbix server: Number of processed numeric (unsigned) values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed numeric (unsigned) values.</p> |DEPENDENT |wcache.values.uint<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.uint`</p><p>- CHANGE_PER_SECOND |
+|Zabbix_server |Remote Zabbix server: Zabbix stats queue over 10m |<p>Number of monitored items in the queue which are delayed at least by 10 minutes.</p> |INTERNAL |zabbix[stats,{$ADDRESS},{$PORT},queue,10m]<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue`</p> |
+|Zabbix_server |Remote Zabbix server: Zabbix stats queue |<p>Number of monitored items in the queue which are delayed at least by 6 seconds.</p> |INTERNAL |zabbix[stats,{$ADDRESS},{$PORT},queue]<p>**Preprocessing**:</p><p>- JSONPATH: `$.queue`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of alert manager internal processes, in % |<p>Average percentage of time alert manager processes have been busy in the last minute</p> |DEPENDENT |process.alert_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['alert manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes alert manager not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of alert syncer internal processes, in % |<p>Average percentage of time alert syncer processes have been busy in the last minute</p> |DEPENDENT |process.alert_syncer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['alert syncer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes alert syncer not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of alerter internal processes, in % |<p>Average percentage of time alerter processes have been busy in the last minute</p> |DEPENDENT |process.alerter.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['alerter'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes alerter not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of availability manager internal processes, in % |<p>Average percentage of time availability manager processes have been busy in the last minute</p> |DEPENDENT |process.availability_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['availability manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes availability manager not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of configuration syncer internal processes, in % |<p>Average percentage of time configuration syncer processes have been busy in the last minute</p> |DEPENDENT |process.configuration_syncer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['configuration syncer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes configuration syncer not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of discoverer data collector processes, in % |<p>Average percentage of time discoverer processes have been busy in the last minute</p> |DEPENDENT |process.discoverer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['discoverer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes discoverer not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of escalator internal processes, in % |<p>Average percentage of time escalator processes have been busy in the last minute</p> |DEPENDENT |process.escalator.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['escalator'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes escalator not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of history poller data collector processes, in % |<p>Average percentage of time history poller processes have been busy in the last minute</p> |DEPENDENT |process.history_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['history poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes history poller not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of history syncer internal processes, in % |<p>Average percentage of time history syncer processes have been busy in the last minute</p> |DEPENDENT |process.history_syncer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['history syncer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes history syncer not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of housekeeper internal processes, in % |<p>Average percentage of time housekeeper processes have been busy in the last minute</p> |DEPENDENT |process.housekeeper.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['housekeeper'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes housekeeper not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of http poller data collector processes, in % |<p>Average percentage of time http poller processes have been busy in the last minute</p> |DEPENDENT |process.http_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['http poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes http poller not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of icmp pinger data collector processes, in % |<p>Average percentage of time icmp pinger processes have been busy in the last minute</p> |DEPENDENT |process.icmp_pinger.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['icmp pinger'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes icmp pinger not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of ipmi manager internal processes, in % |<p>Average percentage of time ipmi manager processes have been busy in the last minute</p> |DEPENDENT |process.ipmi_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['ipmi manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes ipmi manager not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of ipmi poller data collector processes, in % |<p>Average percentage of time ipmi poller processes have been busy in the last minute</p> |DEPENDENT |process.ipmi_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['ipmi poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes ipmi poller not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of java poller data collector processes, in % |<p>Average percentage of time java poller processes have been busy in the last minute</p> |DEPENDENT |process.java_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['java poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes java poller not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of LLD manager internal processes, in % |<p>Average percentage of time lld manager processes have been busy in the last minute</p> |DEPENDENT |process.lld_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['lld manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes LLD manager not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of LLD worker internal processes, in % |<p>Average percentage of time lld worker processes have been busy in the last minute</p> |DEPENDENT |process.lld_worker.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['lld worker'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes LLD worker not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of poller data collector processes, in % |<p>Average percentage of time poller processes have been busy in the last minute</p> |DEPENDENT |process.poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes poller not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of preprocessing worker internal processes, in % |<p>Average percentage of time preprocessing worker processes have been busy in the last minute</p> |DEPENDENT |process.preprocessing_worker.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['preprocessing worker'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes preprocessing worker not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of preprocessing manager internal processes, in % |<p>Average percentage of time preprocessing manager processes have been busy in the last minute</p> |DEPENDENT |process.preprocessing_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['preprocessing manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes preprocessing manager not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of proxy poller data collector processes, in % |<p>Average percentage of time proxy poller processes have been busy in the last minute</p> |DEPENDENT |process.proxy_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['proxy poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes proxy poller not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of report manager internal processes, in % |<p>Average percentage of time report manager processes have been busy in the last minute</p> |DEPENDENT |process.report_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['report manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes report manager not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of report writer internal processes, in % |<p>Average percentage of time report writer processes have been busy in the last minute</p> |DEPENDENT |process.report_writer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['report writer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes report writer not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of self-monitoring internal processes, in % |<p>Average percentage of time self-monitoring processes have been busy in the last minute</p> |DEPENDENT |process.self-monitoring.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['self-monitoring'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes self-monitoring not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of snmp trapper data collector processes, in % |<p>Average percentage of time snmp trapper processes have been busy in the last minute</p> |DEPENDENT |process.snmp_trapper.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['snmp trapper'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes snmp trapper not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of task manager internal processes, in % |<p>Average percentage of time task manager processes have been busy in the last minute</p> |DEPENDENT |process.task_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['task manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes task manager not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of timer internal processes, in % |<p>Average percentage of time timer processes have been busy in the last minute</p> |DEPENDENT |process.timer.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['timer'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes timer not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of service manager internal processes, in % |<p>Average percentage of time service manager processes have been busy in the last minute</p> |DEPENDENT |process.service_manager.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['service manager'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes service manager not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of trigger housekeeper internal processes, in % |<p>Average percentage of time trigger housekeeper processes have been busy in the last minute</p> |DEPENDENT |process.trigger_housekeeper.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['trigger housekeeper'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes trigger housekeeper not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of trapper data collector processes, in % |<p>Average percentage of time trapper processes have been busy in the last minute</p> |DEPENDENT |process.trapper.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['trapper'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes trapper not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of unreachable poller data collector processes, in % |<p>Average percentage of time unreachable poller processes have been busy in the last minute</p> |DEPENDENT |process.unreachable_poller.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['unreachable poller'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes unreachable poller not started`</p> |
+|Zabbix_server |Remote Zabbix server: Utilization of vmware data collector processes, in % |<p>Average percentage of time vmware collector processes have been busy in the last minute</p> |DEPENDENT |process.vmware_collector.avg.busy<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.process['vmware collector'].busy.avg`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Processes vmware collector not started`</p> |
+|Zabbix_server |Remote Zabbix server: Configuration cache, % used |<p>Availability statistics of Zabbix configuration cache. Percentage of used buffer.</p> |DEPENDENT |rcache.buffer.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.rcache.pused`</p> |
+|Zabbix_server |Remote Zabbix server: Trend function cache, % unique requests |<p>Effectiveness statistics of the Zabbix trend function cache. Percentage</p><p>of cached items from cached items + requests. Low percentage most likely means</p><p>that the cache size can be reduced.</p> |DEPENDENT |tcache.pitems<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.tcache.pitems`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Not supported this version`</p> |
+|Zabbix_server |Remote Zabbix server: Trend function cache, % misses |<p>Effectiveness statistics of the Zabbix trend function cache. Percentage of cache misses.</p> |DEPENDENT |tcache.pmisses<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.tcache.pmisses`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> Not supported this version`</p> |
+|Zabbix_server |Remote Zabbix server: Value cache, % used |<p>Availability statistics of Zabbix value cache. Percentage of used buffer.</p> |DEPENDENT |vcache.buffer.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vcache.buffer.pused`</p> |
+|Zabbix_server |Remote Zabbix server: Value cache hits |<p>Effectiveness statistics of Zabbix value cache. Number of cache hits</p><p>(history values taken from the cache).</p> |DEPENDENT |vcache.cache.hits<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vcache.cache.hits`</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Remote Zabbix server: Value cache misses |<p>Effectiveness statistics of Zabbix value cache. Number of cache misses</p><p>(history values taken from the database).</p> |DEPENDENT |vcache.cache.misses<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vcache.cache.misses`</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Remote Zabbix server: Value cache operating mode |<p>Value cache operating mode.</p> |DEPENDENT |vcache.cache.mode<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vcache.cache.mode`</p> |
+|Zabbix_server |Remote Zabbix server: Version |<p>Version of Zabbix server.</p> |DEPENDENT |version<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.version`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Zabbix_server |Remote Zabbix server: VMware cache, % used |<p>Availability statistics of Zabbix vmware cache. Percentage of used buffer.</p> |DEPENDENT |vmware.buffer.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.vmware.pused`</p><p>⛔️ON_FAIL: `CUSTOM_ERROR -> No vmware collector processes started`</p> |
+|Zabbix_server |Remote Zabbix server: History write cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used history buffer.</p><p>History cache is used to store item values. A high number indicates performance problems on the database side.</p> |DEPENDENT |wcache.history.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.history.pused`</p> |
+|Zabbix_server |Remote Zabbix server: History index cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used history index buffer.</p><p>History index cache is used to index values stored in history cache.</p> |DEPENDENT |wcache.index.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.index.pused`</p> |
+|Zabbix_server |Remote Zabbix server: Trend write cache, % used |<p>Statistics and availability of Zabbix write cache. Percentage of used trend buffer.</p><p>Trend cache stores aggregate for the current hour for all items that receive data.</p> |DEPENDENT |wcache.trend.pused<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.trend.pused`</p> |
+|Zabbix_server |Remote Zabbix server: Number of processed values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Total number of values processed by Zabbix server or Zabbix proxy, except unsupported items.</p> |DEPENDENT |wcache.values<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.all`</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Remote Zabbix server: Number of processed numeric (float) values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed float values.</p> |DEPENDENT |wcache.values.float<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.float`</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Remote Zabbix server: Number of processed log values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed log values.</p> |DEPENDENT |wcache.values.log<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.log`</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Remote Zabbix server: Number of processed not supported values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of times item processing resulted in item becoming unsupported or keeping that state.</p> |DEPENDENT |wcache.values.not_supported<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values['not supported']`</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Remote Zabbix server: Number of processed character values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed character/string values.</p> |DEPENDENT |wcache.values.str<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.str`</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Remote Zabbix server: Number of processed text values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed text values.</p> |DEPENDENT |wcache.values.text<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.text`</p><p>- CHANGE_PER_SECOND</p> |
+|Zabbix_server |Remote Zabbix server: LLD queue |<p>Count of values enqueued in the low-level discovery processing queue.</p> |DEPENDENT |lld_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.lld_queue`</p> |
+|Zabbix_server |Remote Zabbix server: Preprocessing queue |<p>Count of values enqueued in the preprocessing queue.</p> |DEPENDENT |preprocessing_queue<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.preprocessing_queue`</p> |
+|Zabbix_server |Remote Zabbix server: Number of processed numeric (unsigned) values per second |<p>Statistics and availability of Zabbix write cache.</p><p>Number of processed numeric (unsigned) values.</p> |DEPENDENT |wcache.values.uint<p>**Preprocessing**:</p><p>- JSONPATH: `$.data.wcache.values.uint`</p><p>- CHANGE_PER_SECOND</p> |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|Remote Zabbix server: More than 100 items having missing data for more than 10 minutes |<p>zabbix[stats,{$IP},{$PORT},queue,10m] item is collecting data about how many items are missing data for more than 10 minutes</p> |`{TEMPLATE_NAME:zabbix[stats,{$ADDRESS},{$PORT},queue,10m].min(10m)}>100` |WARNING | |
-|Remote Zabbix server: Utilization of alert manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.alert_manager.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.alert_manager.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of alert syncer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.alert_syncer.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.alert_syncer.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of alerter processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.alerter.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.alerter.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of availability manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.availability_manager.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.availability_manager.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of configuration syncer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.configuration_syncer.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.configuration_syncer.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of discoverer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.discoverer.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.discoverer.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of escalator processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.escalator.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.escalator.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of history poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.history_poller.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.history_poller.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of history syncer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.history_syncer.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.history_syncer.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of housekeeper processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.housekeeper.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.housekeeper.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of http poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.http_poller.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.http_poller.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of icmp pinger processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.icmp_pinger.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.icmp_pinger.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of ipmi manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.ipmi_manager.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.ipmi_manager.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of ipmi poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.ipmi_poller.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.ipmi_poller.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of java poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.java_poller.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.java_poller.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of lld manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.lld_manager.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.lld_manager.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of lld worker processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.lld_worker.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.lld_worker.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.poller.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.poller.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of preprocessing worker processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.preprocessing_worker.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.preprocessing_worker.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of preprocessing manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.preprocessing_manager.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.preprocessing_manager.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of proxy poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.proxy_poller.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.proxy_poller.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of report manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.report_manager.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.report_manager.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of report writer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.report_writer.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.report_writer.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of self-monitoring processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.self-monitoring.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.self-monitoring.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of snmp trapper processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.snmp_trapper.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.snmp_trapper.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of task manager processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.task_manager.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.task_manager.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of timer processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.timer.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.timer.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of trapper processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.trapper.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.trapper.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of unreachable poller processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.unreachable_poller.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.unreachable_poller.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: Utilization of vmware collector processes over 75% |<p>-</p> |`{TEMPLATE_NAME:process.vmware_collector.avg.busy.avg(10m)}>75`<p>Recovery expression:</p>`{TEMPLATE_NAME:process.vmware_collector.avg.busy.avg(10m)}<65` |AVERAGE | |
-|Remote Zabbix server: More than 75% used in the configuration cache |<p>Consider increasing CacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:rcache.buffer.pused.max(10m)}>75` |AVERAGE | |
-|Remote Zabbix server: More than 95% used in the value cache |<p>Consider increasing ValueCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:vcache.buffer.pused.max(10m)}>95` |AVERAGE | |
-|Remote Zabbix server: Zabbix value cache working in low memory mode |<p>Once the low memory mode has been switched on, the value cache will remain in this state for 24 hours, even if the problem that triggered this mode is resolved sooner.</p> |`{TEMPLATE_NAME:vcache.cache.mode.last()}=1` |HIGH | |
-|Remote Zabbix server: Version has changed (new version: {ITEM.VALUE}) |<p>Remote Zabbix server version has changed. Ack to close.</p> |`{TEMPLATE_NAME:version.diff()}=1 and {TEMPLATE_NAME:version.strlen()}>0` |INFO |<p>Manual close: YES</p> |
-|Remote Zabbix server: More than 75% used in the vmware cache |<p>Consider increasing VMwareCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:vmware.buffer.pused.max(10m)}>75` |AVERAGE | |
-|Remote Zabbix server: More than 75% used in the history cache |<p>Consider increasing HistoryCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:wcache.history.pused.max(10m)}>75` |AVERAGE | |
-|Remote Zabbix server: More than 75% used in the history index cache |<p>Consider increasing HistoryIndexCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:wcache.index.pused.max(10m)}>75` |AVERAGE | |
-|Remote Zabbix server: More than 75% used in the trends cache |<p>Consider increasing TrendCacheSize in the zabbix_server.conf configuration file</p> |`{TEMPLATE_NAME:wcache.trend.pused.max(10m)}>75` |AVERAGE | |
+|Cluster node [{#NODE.NAME}]: Status changed |<p>The state of the node has changed. Confirm to close.</p> |`last(/Remote Zabbix server health/zabbix.nodes.status[{#NODE.ID}],#1)<>last(/Remote Zabbix server health/zabbix.nodes.status[{#NODE.ID}],#2)` |INFO |<p>Manual close: YES</p> |
+|Remote Zabbix server: More than 100 items having missing data for more than 10 minutes |<p>zabbix[stats,{$IP},{$PORT},queue,10m] item is collecting data about</p><p>how many items are missing data for more than 10 minutes.</p> |`min(/Remote Zabbix server health/zabbix[stats,{$ADDRESS},{$PORT},queue,10m],10m)>100` |WARNING | |
+|Remote Zabbix server: Utilization of alert manager processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.alert_manager.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.alert_manager.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of alert syncer processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.alert_syncer.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.alert_syncer.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of alerter processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.alerter.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.alerter.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of availability manager processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.availability_manager.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.availability_manager.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of configuration syncer processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.configuration_syncer.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.configuration_syncer.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of discoverer processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.discoverer.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.discoverer.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of escalator processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.escalator.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.escalator.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of history poller processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.history_poller.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.history_poller.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of history syncer processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.history_syncer.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.history_syncer.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of housekeeper processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.housekeeper.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.housekeeper.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of http poller processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.http_poller.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.http_poller.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of icmp pinger processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.icmp_pinger.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.icmp_pinger.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of ipmi manager processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.ipmi_manager.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.ipmi_manager.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of ipmi poller processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.ipmi_poller.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.ipmi_poller.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of java poller processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.java_poller.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.java_poller.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of lld manager processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.lld_manager.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.lld_manager.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of lld worker processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.lld_worker.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.lld_worker.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of poller processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.poller.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.poller.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of preprocessing worker processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.preprocessing_worker.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.preprocessing_worker.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of preprocessing manager processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.preprocessing_manager.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.preprocessing_manager.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of proxy poller processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.proxy_poller.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.proxy_poller.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of report manager processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.report_manager.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.report_manager.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of report writer processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.report_writer.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.report_writer.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of self-monitoring processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.self-monitoring.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.self-monitoring.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of snmp trapper processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.snmp_trapper.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.snmp_trapper.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of task manager processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.task_manager.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.task_manager.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of timer processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.timer.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.timer.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of service manager processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.service_manager.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.service_manager.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of trigger housekeeper processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.trigger_housekeeper.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.trigger_housekeeper.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of trapper processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.trapper.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.trapper.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of unreachable poller processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.unreachable_poller.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.unreachable_poller.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: Utilization of vmware collector processes over 75% |<p>-</p> |`avg(/Remote Zabbix server health/process.vmware_collector.avg.busy,10m)>75`<p>Recovery expression:</p>`avg(/Remote Zabbix server health/process.vmware_collector.avg.busy,10m)<65` |AVERAGE | |
+|Remote Zabbix server: More than 75% used in the configuration cache |<p>Consider increasing CacheSize in the zabbix_server.conf configuration file.</p> |`max(/Remote Zabbix server health/rcache.buffer.pused,10m)>75` |AVERAGE | |
+|Remote Zabbix server: More than 95% used in the value cache |<p>Consider increasing ValueCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Remote Zabbix server health/vcache.buffer.pused,10m)>95` |AVERAGE | |
+|Remote Zabbix server: Zabbix value cache working in low memory mode |<p>Once the low memory mode has been switched on, the value cache will remain in this state for 24 hours, even if the problem that triggered this mode is resolved sooner.</p> |`last(/Remote Zabbix server health/vcache.cache.mode)=1` |HIGH | |
+|Remote Zabbix server: Version has changed (new version: {ITEM.VALUE}) |<p>Remote Zabbix server version has changed. Ack to close.</p> |`last(/Remote Zabbix server health/version,#1)<>last(/Remote Zabbix server health/version,#2) and length(last(/Remote Zabbix server health/version))>0` |INFO |<p>Manual close: YES</p> |
+|Remote Zabbix server: More than 75% used in the vmware cache |<p>Consider increasing VMwareCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Remote Zabbix server health/vmware.buffer.pused,10m)>75` |AVERAGE | |
+|Remote Zabbix server: More than 75% used in the history cache |<p>Consider increasing HistoryCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Remote Zabbix server health/wcache.history.pused,10m)>75` |AVERAGE | |
+|Remote Zabbix server: More than 75% used in the history index cache |<p>Consider increasing HistoryIndexCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Remote Zabbix server health/wcache.index.pused,10m)>75` |AVERAGE | |
+|Remote Zabbix server: More than 75% used in the trends cache |<p>Consider increasing TrendCacheSize in the zabbix_server.conf configuration file.</p> |`max(/Remote Zabbix server health/wcache.trend.pused,10m)>75` |AVERAGE | |
## Feedback
diff --git a/templates/app/zabbix_server_remote/template_app_remote_zabbix_server.yaml b/templates/app/zabbix_server_remote/template_app_remote_zabbix_server.yaml
index 36dba90f968..ef8d4dd798c 100644
--- a/templates/app/zabbix_server_remote/template_app_remote_zabbix_server.yaml
+++ b/templates/app/zabbix_server_remote/template_app_remote_zabbix_server.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T10:33:50Z'
+ date: '2021-12-19T15:20:01Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -10,7 +10,7 @@ zabbix_export:
uuid: 79b16cbbe593444eae3de66de0cb566b
template: 'Remote Zabbix server health'
name: 'Remote Zabbix server health'
- description: 'Template tooling version used: 0.38'
+ description: 'Template tooling version used: 0.40'
groups:
-
name: Templates/Applications
@@ -798,6 +798,37 @@ zabbix_export:
name: 'Remote Zabbix server: Utilization of self-monitoring processes over 75%'
priority: AVERAGE
-
+ uuid: 99da1d8bd47447d282542d40c1edea00
+ name: 'Remote Zabbix server: Utilization of service manager internal processes, in %'
+ type: DEPENDENT
+ key: process.service_manager.avg.busy
+ delay: '0'
+ history: 1w
+ value_type: FLOAT
+ units: '%'
+ description: 'Average percentage of time service manager processes have been busy in the last minute'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.data.process[''service manager''].busy.avg'
+ error_handler: CUSTOM_ERROR
+ error_handler_params: 'Processes service manager not started'
+ master_item:
+ key: 'zabbix[stats,{$ADDRESS},{$PORT}]'
+ tags:
+ -
+ tag: Application
+ value: 'Zabbix server'
+ triggers:
+ -
+ uuid: 001c9e2027ca4e7c8f5dc5b78681c573
+ expression: 'avg(/Remote Zabbix server health/process.service_manager.avg.busy,10m)>75'
+ recovery_mode: RECOVERY_EXPRESSION
+ recovery_expression: 'avg(/Remote Zabbix server health/process.service_manager.avg.busy,10m)<65'
+ name: 'Remote Zabbix server: Utilization of service manager processes over 75%'
+ priority: AVERAGE
+ -
uuid: 643036d74a10414f94dc357efbbd42b2
name: 'Remote Zabbix server: Utilization of snmp trapper data collector processes, in %'
type: DEPENDENT
@@ -922,6 +953,37 @@ zabbix_export:
name: 'Remote Zabbix server: Utilization of trapper processes over 75%'
priority: AVERAGE
-
+ uuid: 56dca0db33264d4b9c8fd769fbdff7b5
+ name: 'Remote Zabbix server: Utilization of trigger housekeeper internal processes, in %'
+ type: DEPENDENT
+ key: process.trigger_housekeeper.avg.busy
+ delay: '0'
+ history: 1w
+ value_type: FLOAT
+ units: '%'
+ description: 'Average percentage of time trigger housekeeper processes have been busy in the last minute'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.data.process[''trigger housekeeper''].busy.avg'
+ error_handler: CUSTOM_ERROR
+ error_handler_params: 'Processes trigger housekeeper not started'
+ master_item:
+ key: 'zabbix[stats,{$ADDRESS},{$PORT}]'
+ tags:
+ -
+ tag: Application
+ value: 'Zabbix server'
+ triggers:
+ -
+ uuid: 9c0daf6e85884122aa8c5b8432709c92
+ expression: 'avg(/Remote Zabbix server health/process.trigger_housekeeper.avg.busy,10m)>75'
+ recovery_mode: RECOVERY_EXPRESSION
+ recovery_expression: 'avg(/Remote Zabbix server health/process.trigger_housekeeper.avg.busy,10m)<65'
+ name: 'Remote Zabbix server: Utilization of trigger housekeeper processes over 75%'
+ priority: AVERAGE
+ -
uuid: 8d43954f6b9843009353c560a4f84157
name: 'Remote Zabbix server: Utilization of unreachable poller data collector processes, in %'
type: DEPENDENT
@@ -992,7 +1054,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: 'Availability statistics of Zabbix configuration cache. Percentage of used buffer'
+ description: 'Availability statistics of Zabbix configuration cache. Percentage of used buffer.'
preprocessing:
-
type: JSONPATH
@@ -1010,7 +1072,7 @@ zabbix_export:
expression: 'max(/Remote Zabbix server health/rcache.buffer.pused,10m)>75'
name: 'Remote Zabbix server: More than 75% used in the configuration cache'
priority: AVERAGE
- description: 'Consider increasing CacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing CacheSize in the zabbix_server.conf configuration file.'
-
uuid: 1625b3470fbf4bd7aa5b7051e328b37c
name: 'Remote Zabbix server: Trend function cache, % unique requests'
@@ -1020,7 +1082,10 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: 'Effectiveness statistics of the Zabbix trend function cache. Percentage of cached items from cached items + requests. Low percentage most likely means that the cache size can be reduced.'
+ description: |
+ Effectiveness statistics of the Zabbix trend function cache. Percentage
+ of cached items from cached items + requests. Low percentage most likely means
+ that the cache size can be reduced.
preprocessing:
-
type: JSONPATH
@@ -1043,7 +1108,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: "Effectiveness statistics of the Zabbix trend function cache.\tPercentage of cache misses"
+ description: 'Effectiveness statistics of the Zabbix trend function cache. Percentage of cache misses.'
preprocessing:
-
type: JSONPATH
@@ -1066,7 +1131,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: "Availability statistics of Zabbix value cache.\tPercentage of used buffer"
+ description: 'Availability statistics of Zabbix value cache. Percentage of used buffer.'
preprocessing:
-
type: JSONPATH
@@ -1084,7 +1149,7 @@ zabbix_export:
expression: 'max(/Remote Zabbix server health/vcache.buffer.pused,10m)>95'
name: 'Remote Zabbix server: More than 95% used in the value cache'
priority: AVERAGE
- description: 'Consider increasing ValueCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing ValueCacheSize in the zabbix_server.conf configuration file.'
-
uuid: 9bd0079126974bf2a61f552de2cdf880
name: 'Remote Zabbix server: Value cache hits'
@@ -1094,7 +1159,9 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: vps
- description: 'Effectiveness statistics of Zabbix value cache. Number of cache hits (history values taken from the cache)'
+ description: |
+ Effectiveness statistics of Zabbix value cache. Number of cache hits
+ (history values taken from the cache).
preprocessing:
-
type: JSONPATH
@@ -1119,7 +1186,9 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: vps
- description: 'Effectiveness statistics of Zabbix value cache. Number of cache misses (history values taken from the database)'
+ description: |
+ Effectiveness statistics of Zabbix value cache. Number of cache misses
+ (history values taken from the database).
preprocessing:
-
type: JSONPATH
@@ -1142,7 +1211,7 @@ zabbix_export:
key: vcache.cache.mode
delay: '0'
history: 1w
- description: 'Value cache operating mode'
+ description: 'Value cache operating mode.'
valuemap:
name: 'Value cache operating mode'
preprocessing:
@@ -1205,7 +1274,7 @@ zabbix_export:
history: 1w
value_type: FLOAT
units: '%'
- description: 'Availability statistics of Zabbix vmware cache. Percentage of used buffer'
+ description: 'Availability statistics of Zabbix vmware cache. Percentage of used buffer.'
preprocessing:
-
type: JSONPATH
@@ -1225,7 +1294,7 @@ zabbix_export:
expression: 'max(/Remote Zabbix server health/vmware.buffer.pused,10m)>75'
name: 'Remote Zabbix server: More than 75% used in the vmware cache'
priority: AVERAGE
- description: 'Consider increasing VMwareCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing VMwareCacheSize in the zabbix_server.conf configuration file.'
-
uuid: 98f47a07e2ba4294a9eb2aa1df2ba9f3
name: 'Remote Zabbix server: History write cache, % used'
@@ -1255,7 +1324,7 @@ zabbix_export:
expression: 'max(/Remote Zabbix server health/wcache.history.pused,10m)>75'
name: 'Remote Zabbix server: More than 75% used in the history cache'
priority: AVERAGE
- description: 'Consider increasing HistoryCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing HistoryCacheSize in the zabbix_server.conf configuration file.'
-
uuid: b1add70210a44b668f06cc3e062173ad
name: 'Remote Zabbix server: History index cache, % used'
@@ -1285,7 +1354,7 @@ zabbix_export:
expression: 'max(/Remote Zabbix server health/wcache.index.pused,10m)>75'
name: 'Remote Zabbix server: More than 75% used in the history index cache'
priority: AVERAGE
- description: 'Consider increasing HistoryIndexCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing HistoryIndexCacheSize in the zabbix_server.conf configuration file.'
-
uuid: 1f79208bb2ec4e4eb26e6e1b95c3635e
name: 'Remote Zabbix server: Trend write cache, % used'
@@ -1315,7 +1384,7 @@ zabbix_export:
expression: 'max(/Remote Zabbix server health/wcache.trend.pused,10m)>75'
name: 'Remote Zabbix server: More than 75% used in the trends cache'
priority: AVERAGE
- description: 'Consider increasing TrendCacheSize in the zabbix_server.conf configuration file'
+ description: 'Consider increasing TrendCacheSize in the zabbix_server.conf configuration file.'
-
uuid: 34fe014843974248a98a45596ce43e1a
name: 'Remote Zabbix server: Number of processed values per second'
@@ -1504,7 +1573,7 @@ zabbix_export:
type: INTERNAL
key: 'zabbix[stats,{$ADDRESS},{$PORT},queue,10m]'
history: 1w
- description: 'Number of monitored items in the queue which are delayed at least by 10 minutes'
+ description: 'Number of monitored items in the queue which are delayed at least by 10 minutes.'
preprocessing:
-
type: JSONPATH
@@ -1520,14 +1589,16 @@ zabbix_export:
expression: 'min(/Remote Zabbix server health/zabbix[stats,{$ADDRESS},{$PORT},queue,10m],10m)>100'
name: 'Remote Zabbix server: More than 100 items having missing data for more than 10 minutes'
priority: WARNING
- description: 'zabbix[stats,{$IP},{$PORT},queue,10m] item is collecting data about how many items are missing data for more than 10 minutes'
+ description: |
+ zabbix[stats,{$IP},{$PORT},queue,10m] item is collecting data about
+ how many items are missing data for more than 10 minutes.
-
uuid: c140ebe6c0404ee6b17b5ada2de09f28
name: 'Remote Zabbix server: Zabbix stats queue'
type: INTERNAL
key: 'zabbix[stats,{$ADDRESS},{$PORT},queue]'
history: 1w
- description: 'Number of monitored items in the queue which are delayed at least by 6 seconds'
+ description: 'Number of monitored items in the queue which are delayed at least by 6 seconds.'
preprocessing:
-
type: JSONPATH
@@ -1550,6 +1621,128 @@ zabbix_export:
-
tag: Application
value: 'Zabbix raw items'
+ discovery_rules:
+ -
+ uuid: 0557f879881a4d558410444737ae3738
+ name: 'High availability cluster node discovery'
+ type: DEPENDENT
+ key: zabbix.nodes.discovery
+ delay: '0'
+ description: 'LLD rule with item and trigger prototypes for node discovery.'
+ item_prototypes:
+ -
+ uuid: caca3ccdcec141a3a9c3233ba72b8d0d
+ name: 'Cluster node [{#NODE.NAME}]: Address'
+ type: DEPENDENT
+ key: 'zabbix.nodes.address[{#NODE.ID}]'
+ delay: '0'
+ history: 1w
+ trends: '0'
+ value_type: CHAR
+ description: 'Node IPv4 address.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.data.ha[?(@.id=="{#NODE.ID}")].address.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ master_item:
+ key: 'zabbix[stats,{$ADDRESS},{$PORT}]'
+ tags:
+ -
+ tag: Application
+ value: 'Cluster node [{#NODE.NAME}]'
+ -
+ uuid: a9ded61c4dba4abb9213aec9af00a604
+ name: 'Cluster node [{#NODE.NAME}]: Last access age'
+ type: DEPENDENT
+ key: 'zabbix.nodes.lastaccess.age[{#NODE.ID}]'
+ delay: '0'
+ history: 1w
+ units: uptime
+ description: 'Time between database unix_timestamp() and last access time.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.data.ha[?(@.id=="{#NODE.ID}")].lastaccess_age.first()'
+ master_item:
+ key: 'zabbix[stats,{$ADDRESS},{$PORT}]'
+ tags:
+ -
+ tag: Application
+ value: 'Cluster node [{#NODE.NAME}]'
+ -
+ uuid: 0968e4fe51eb4c2b98a508090014aba0
+ name: 'Cluster node [{#NODE.NAME}]: Last access time'
+ type: DEPENDENT
+ key: 'zabbix.nodes.lastaccess.time[{#NODE.ID}]'
+ delay: '0'
+ history: 1w
+ units: unixtime
+ description: 'Last access time.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.data.ha[?(@.id=="{#NODE.ID}")].lastaccess.first()'
+ master_item:
+ key: 'zabbix[stats,{$ADDRESS},{$PORT}]'
+ tags:
+ -
+ tag: Application
+ value: 'Cluster node [{#NODE.NAME}]'
+ -
+ uuid: 2b629362886546cab34396352b93835a
+ name: 'Cluster node [{#NODE.NAME}]: Status'
+ type: DEPENDENT
+ key: 'zabbix.nodes.status[{#NODE.ID}]'
+ delay: '0'
+ history: 1w
+ description: 'Node status.'
+ valuemap:
+ name: 'Cluster node status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.data.ha[?(@.id=="{#NODE.ID}")].status.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ master_item:
+ key: 'zabbix[stats,{$ADDRESS},{$PORT}]'
+ tags:
+ -
+ tag: Application
+ value: 'Cluster node [{#NODE.NAME}]'
+ trigger_prototypes:
+ -
+ uuid: 5d68b55175924cb4adde600a019496c4
+ expression: 'last(/Remote Zabbix server health/zabbix.nodes.status[{#NODE.ID}],#1)<>last(/Remote Zabbix server health/zabbix.nodes.status[{#NODE.ID}],#2)'
+ name: 'Cluster node [{#NODE.NAME}]: Status changed'
+ opdata: 'Current value: {ITEM.LASTVALUE1}'
+ priority: INFO
+ description: 'The state of the node has changed. Confirm to close.'
+ manual_close: 'YES'
+ master_item:
+ key: 'zabbix[stats,{$ADDRESS},{$PORT}]'
+ lld_macro_paths:
+ -
+ lld_macro: '{#NODE.ID}'
+ path: $.id
+ -
+ lld_macro: '{#NODE.NAME}'
+ path: $.name
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.data.ha
macros:
-
macro: '{$ADDRESS}'
@@ -1731,6 +1924,22 @@ zabbix_export:
host: 'Remote Zabbix server health'
valuemaps:
-
+ uuid: 25ab0f5c570b4a7e9d15bd41db79fe25
+ name: 'Cluster node status'
+ mappings:
+ -
+ value: '0'
+ newvalue: Standby
+ -
+ value: '1'
+ newvalue: Stopped
+ -
+ value: '2'
+ newvalue: Unavailable
+ -
+ value: '3'
+ newvalue: Active
+ -
uuid: 5dff563dde3c45d8b6d92525111384c6
name: 'Value cache operating mode'
mappings:
@@ -2201,6 +2410,18 @@ zabbix_export:
item:
host: 'Remote Zabbix server health'
key: process.report_writer.avg.busy
+ -
+ sortorder: '18'
+ color: 790E1F
+ item:
+ host: 'Remote Zabbix server health'
+ key: process.service_manager.avg.busy
+ -
+ sortorder: '19'
+ color: 87AC4D
+ item:
+ host: 'Remote Zabbix server health'
+ key: process.trigger_housekeeper.avg.busy
-
uuid: 63004d5061324a90b247209977de9cbd
name: 'Remote Zabbix server: Zabbix internal queues'
diff --git a/templates/app/zookeeper_http/README.md b/templates/app/zookeeper_http/README.md
index 975cfeb832e..fc00d85262e 100644
--- a/templates/app/zookeeper_http/README.md
+++ b/templates/app/zookeeper_http/README.md
@@ -17,9 +17,9 @@ This template was tested on:
> See [Zabbix template operation](https://www.zabbix.com/documentation/5.4/manual/config/templates_out_of_the_box/http) for basic instructions.
-This template works with standalone and cluster instances. Metrics are collected from each Zookeper node by requests to [AdminServer](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_adminserver).
-By default AdminServer is enabled and listens on port 8080.
-You can enable or configure AdminServer parameters according [official documentations](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_adminserver_config).
+This template works with standalone and cluster instances. Metrics are collected from each Zookeper node by requests to [AdminServer](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_adminserver).
+By default AdminServer is enabled and listens on port 8080.
+You can enable or configure AdminServer parameters according [official documentations](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_adminserver_config).
Don't forget to change macros {$ZOOKEEPER.COMMAND_URL}, {$ZOOKEEPER.PORT}, {$ZOOKEEPER.SCHEME}.
@@ -29,14 +29,14 @@ No specific Zabbix configuration is required.
### Macros used
-| Name | Description | Default |
-|----------------------------------------|-----------------------------------------------------------------------------------------------|------------|
-| {$ZOOKEEPER.COMMAND_URL} | <p>The URL for listing and issuing commands relative to the root URL (admin.commandURL).</p> | `commands` |
-| {$ZOOKEEPER.FILE_DESCRIPTORS.MAX.WARN} | <p>Maximum percentage of file descriptors usage alert threshold (for trigger expression).</p> | `85` |
-| {$ZOOKEEPER.OUTSTANDING_REQ.MAX.WARN} | <p>Maximum number of outstanding requests (for trigger expression).</p> | `10` |
-| {$ZOOKEEPER.PENDING_SYNCS.MAX.WARN} | <p>Maximum number of pending syncs from the followers (for trigger expression).</p> | `10` |
-| {$ZOOKEEPER.PORT} | <p>The port the embedded Jetty server listens on (admin.serverPort).</p> | `8080` |
-| {$ZOOKEEPER.SCHEME} | <p>Request scheme which may be http or https</p> | `http` |
+|Name|Description|Default|
+|----|-----------|-------|
+|{$ZOOKEEPER.COMMAND_URL} |<p>The URL for listing and issuing commands relative to the root URL (admin.commandURL).</p> |`commands` |
+|{$ZOOKEEPER.FILE_DESCRIPTORS.MAX.WARN} |<p>Maximum percentage of file descriptors usage alert threshold (for trigger expression).</p> |`85` |
+|{$ZOOKEEPER.OUTSTANDING_REQ.MAX.WARN} |<p>Maximum number of outstanding requests (for trigger expression).</p> |`10` |
+|{$ZOOKEEPER.PENDING_SYNCS.MAX.WARN} |<p>Maximum number of pending syncs from the followers (for trigger expression).</p> |`10` |
+|{$ZOOKEEPER.PORT} |<p>The port the embedded Jetty server listens on (admin.serverPort).</p> |`8080` |
+|{$ZOOKEEPER.SCHEME} |<p>Request scheme which may be http or https</p> |`http` |
## Template links
@@ -44,75 +44,75 @@ There are no template links in this template.
## Discovery rules
-| Name | Description | Type | Key and additional info |
-|--------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Leader metrics discovery | <p>Additional metrics for leader node</p> | DEPENDENT | zookeeper.metrics.leader<p>**Preprocessing**:</p><p>- JSONPATH: `$.server_state`</p><p>- JAVASCRIPT: `return JSON.stringify(value == 'leader' ? [{'{#SINGLETON}': ''}] : []);`</p> |
-| Clients discovery | <p>Get list of client connections.</p><p>Note, depending on the number of client connections this operation may be expensive (i.e. impact server performance).</p> | HTTP_AGENT | zookeeper.clients<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Leader metrics discovery |<p>Additional metrics for leader node</p> |DEPENDENT |zookeeper.metrics.leader<p>**Preprocessing**:</p><p>- JSONPATH: `$.server_state`</p><p>- JAVASCRIPT: `return JSON.stringify(value == 'leader' ? [{'{#SINGLETON}': ''}] : []);`</p> |
+|Clients discovery |<p>Get list of client connections.</p><p>Note, depending on the number of client connections this operation may be expensive (i.e. impact server performance).</p> |HTTP_AGENT |zookeeper.clients<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
## Items collected
-| Group | Name | Description | Type | Key and additional info |
-|------------------|----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Zabbix_raw_items | Zookeeper: Get server metrics | <p>-</p> | HTTP_AGENT | zookeeper.get_metrics |
-| Zabbix_raw_items | Zookeeper: Get connections stats | <p>Get information on client connections to server. Note, depending on the number of client connections this operation may be expensive (i.e. impact server performance).</p> | HTTP_AGENT | zookeeper.get_connections_stats |
-| Zookeeper | Zookeeper: Server mode | <p>Mode of the server. In an ensemble, this may either be leader or follower. Otherwise, it is standalone</p> | DEPENDENT | zookeeper.server_state<p>**Preprocessing**:</p><p>- JSONPATH: `$.server_state`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zookeeper | Zookeeper: Uptime | <p>Uptime of Zookeeper server.</p> | DEPENDENT | zookeeper.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.uptime`</p><p>- MULTIPLIER: `0.001`</p> |
-| Zookeeper | Zookeeper: Version | <p>Version of Zookeeper server.</p> | DEPENDENT | zookeeper.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.version`</p><p>- REGEX: `([^,]+)--(.+) \1`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
-| Zookeeper | Zookeeper: Approximate data size | <p>Data tree size in bytes.The size includes the znode path and its value.</p> | DEPENDENT | zookeeper.approximate_data_size<p>**Preprocessing**:</p><p>- JSONPATH: `$.approximate_data_size`</p> |
-| Zookeeper | Zookeeper: File descriptors, max | <p>Maximum number of file descriptors that a zookeeper server can open.</p> | DEPENDENT | zookeeper.max_file_descriptor_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.max_file_descriptor_count`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zookeeper | Zookeeper: File descriptors, open | <p>Number of file descriptors that a zookeeper server has open.</p> | DEPENDENT | zookeeper.open_file_descriptor_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.open_file_descriptor_count`</p> |
-| Zookeeper | Zookeeper: Outstanding requests | <p>The number of queued requests when the server is under load and is receiving more sustained requests than it can process.</p> | DEPENDENT | zookeeper.outstanding_requests<p>**Preprocessing**:</p><p>- JSONPATH: `$.outstanding_requests`</p> |
-| Zookeeper | Zookeeper: Commit per sec | <p>The number of commits performed per second</p> | DEPENDENT | zookeeper.commit_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.commit_count`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Diff syncs per sec | <p>Number of diff syncs performed per second</p> | DEPENDENT | zookeeper.diff_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.diff_count`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Snap syncs per sec | <p>Number of snap syncs performed per second</p> | DEPENDENT | zookeeper.snap_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.snap_count`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Looking per sec | <p>Rate of transitions into looking state.</p> | DEPENDENT | zookeeper.looking_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.looking_count`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Alive connections | <p>Number of active clients connected to a zookeeper server.</p> | DEPENDENT | zookeeper.num_alive_connections<p>**Preprocessing**:</p><p>- JSONPATH: `$.num_alive_connections`</p> |
-| Zookeeper | Zookeeper: Global sessions | <p>Number of global sessions.</p> | DEPENDENT | zookeeper.global_sessions<p>**Preprocessing**:</p><p>- JSONPATH: `$.global_sessions`</p> |
-| Zookeeper | Zookeeper: Local sessions | <p>Number of local sessions.</p> | DEPENDENT | zookeeper.local_sessions<p>**Preprocessing**:</p><p>- JSONPATH: `$.local_sessions`</p> |
-| Zookeeper | Zookeeper: Drop connections per sec | <p>Rate of connection drops.</p> | DEPENDENT | zookeeper.connection_drop_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.connection_drop_count`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Rejected connections per sec | <p>Rate of connection rejected.</p> | DEPENDENT | zookeeper.connection_rejected.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.connection_rejected`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Revalidate connections per sec | <p>Rate ofconnection revalidations.</p> | DEPENDENT | zookeeper.connection_revalidate_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.connection_revalidate_count`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Revalidate per sec | <p>Rate of revalidations.</p> | DEPENDENT | zookeeper.revalidate_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.revalidate_count`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Latency, max | <p>The maximum amount of time it takes for the server to respond to a client request.</p> | DEPENDENT | zookeeper.max_latency<p>**Preprocessing**:</p><p>- JSONPATH: `$.max_latency`</p> |
-| Zookeeper | Zookeeper: Latency, min | <p>The minimum amount of time it takes for the server to respond to a client request.</p> | DEPENDENT | zookeeper.min_latency<p>**Preprocessing**:</p><p>- JSONPATH: `$.min_latency`</p> |
-| Zookeeper | Zookeeper: Latency, avg | <p>The average amount of time it takes for the server to respond to a client request.</p> | DEPENDENT | zookeeper.avg_latency<p>**Preprocessing**:</p><p>- JSONPATH: `$.avg_latency`</p> |
-| Zookeeper | Zookeeper: Znode count | <p>The number of znodes in the ZooKeeper namespace (the data)</p> | DEPENDENT | zookeeper.znode_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.znode_count`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
-| Zookeeper | Zookeeper: Ephemeral nodes count | <p>Number of ephemeral nodes that a zookeeper server has in its data tree.</p> | DEPENDENT | zookeeper.ephemerals_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.ephemerals_count`</p> |
-| Zookeeper | Zookeeper: Watch count | <p>Number of watches currently set on the local ZooKeeper process.</p> | DEPENDENT | zookeeper.watch_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.watch_count`</p> |
-| Zookeeper | Zookeeper: Packets sent per sec | <p>The number of zookeeper packets sent from a server per second.</p> | DEPENDENT | zookeeper.packets_sent<p>**Preprocessing**:</p><p>- JSONPATH: `$.packets_sent`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Packets received per sec | <p>The number of zookeeper packets received by a server per second.</p> | DEPENDENT | zookeeper.packets_received.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.packets_received`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Bytes received per sec | <p>Number of bytes received per second.</p> | DEPENDENT | zookeeper.bytes_received_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.bytes_received_count`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper: Election time, avg | <p>Time between entering and leaving election.</p> | DEPENDENT | zookeeper.avg_election_time<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Zookeeper | Zookeeper: Elections | <p>Number of elections happened.</p> | DEPENDENT | zookeeper.cnt_election_time<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Zookeeper | Zookeeper: Fsync time, avg | <p>Time to fsync transaction log.</p> | DEPENDENT | zookeeper.avg_fsynctime<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Zookeeper | Zookeeper: Fsync | <p>Count of performed fsyncs.</p> | DEPENDENT | zookeeper.cnt_fsynctime<p>**Preprocessing**:</p><p>- JAVASCRIPT: `var metrics = JSON.parse(value) return metrics.cnt_fsynctime || metrics.fsynctime_count`</p> |
-| Zookeeper | Zookeeper: Snapshot write time, avg | <p>Average time to write a snapshot.</p> | DEPENDENT | zookeeper.avg_snapshottime<p>**Preprocessing**:</p><p>- JAVASCRIPT: `Text is too long. Please see the template.`</p> |
-| Zookeeper | Zookeeper: Snapshot writes | <p>Count of performed snapshot writes.</p> | DEPENDENT | zookeeper.cnt_snapshottime<p>**Preprocessing**:</p><p>- JAVASCRIPT: `var metrics = JSON.parse(value) return metrics.snapshottime_count || metrics.cnt_snapshottime`</p> |
-| Zookeeper | Zookeeper: Pending syncs{#SINGLETON} | <p>Number of pending syncs to carry out to ZooKeeper ensemble followers.</p> | DEPENDENT | zookeeper.pending_syncs[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pending_syncs`</p> |
-| Zookeeper | Zookeeper: Quorum size{#SINGLETON} | | DEPENDENT | zookeeper.quorum_size[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.quorum_size`</p> |
-| Zookeeper | Zookeeper: Synced followers{#SINGLETON} | <p>Number of synced followers reported when a node server_state is leader.</p> | DEPENDENT | zookeeper.synced_followers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.synced_followers`</p> |
-| Zookeeper | Zookeeper: Synced non-voting follower{#SINGLETON} | <p>Number of synced voting followers reported when a node server_state is leader.</p> | DEPENDENT | zookeeper.synced_non_voting_followers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.synced_non_voting_followers`</p> |
-| Zookeeper | Zookeeper: Synced observers{#SINGLETON} | <p>Number of synced observers.</p> | DEPENDENT | zookeeper.synced_observers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.synced_observers`</p> |
-| Zookeeper | Zookeeper: Learners{#SINGLETON} | <p>Number of learners.</p> | DEPENDENT | zookeeper.learners[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.learners`</p> |
-| Zookeeper | Zookeeper client {#TYPE} [{#CLIENT}]: Latency, max | <p>The maximum amount of time it takes for the server to respond to a client request.</p> | DEPENDENT | zookeeper.max_latency[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].max_latency.first()`</p> |
-| Zookeeper | Zookeeper client {#TYPE} [{#CLIENT}]: Latency, min | <p>The minimum amount of time it takes for the server to respond to a client request.</p> | DEPENDENT | zookeeper.min_latency[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].min_latency.first()`</p> |
-| Zookeeper | Zookeeper client {#TYPE} [{#CLIENT}]: Latency, avg | <p>The average amount of time it takes for the server to respond to a client request.</p> | DEPENDENT | zookeeper.avg_latency[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].avg_latency.first()`</p> |
-| Zookeeper | Zookeeper client {#TYPE} [{#CLIENT}]: Packets sent per sec | <p>The number of packets sent.</p> | DEPENDENT | zookeeper.packets_sent[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].packets_sent.first()`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper client {#TYPE} [{#CLIENT}]: Packets received per sec | <p>The number of packets received.</p> | DEPENDENT | zookeeper.packets_received[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].packets_received.first()`</p><p>- CHANGE_PER_SECOND |
-| Zookeeper | Zookeeper client {#TYPE} [{#CLIENT}]: Outstanding requests | <p>The number of queued requests when the server is under load and is receiving more sustained requests than it can process.</p> | DEPENDENT | zookeeper.outstanding_requests[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].outstanding_requests.first()`</p> |
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|Zabbix_raw_items |Zookeeper: Get server metrics |<p>-</p> |HTTP_AGENT |zookeeper.get_metrics |
+|Zabbix_raw_items |Zookeeper: Get connections stats |<p>Get information on client connections to server. Note, depending on the number of client connections this operation may be expensive (i.e. impact server performance).</p> |HTTP_AGENT |zookeeper.get_connections_stats |
+|Zookeeper |Zookeeper: Server mode |<p>Mode of the server. In an ensemble, this may either be leader or follower. Otherwise, it is standalone</p> |DEPENDENT |zookeeper.server_state<p>**Preprocessing**:</p><p>- JSONPATH: `$.server_state`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zookeeper |Zookeeper: Uptime |<p>Uptime of Zookeeper server.</p> |DEPENDENT |zookeeper.uptime<p>**Preprocessing**:</p><p>- JSONPATH: `$.uptime`</p><p>- MULTIPLIER: `0.001`</p> |
+|Zookeeper |Zookeeper: Version |<p>Version of Zookeeper server.</p> |DEPENDENT |zookeeper.version<p>**Preprocessing**:</p><p>- JSONPATH: `$.version`</p><p>- REGEX: `([^,]+)--(.+) \1`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `3h`</p> |
+|Zookeeper |Zookeeper: Approximate data size |<p>Data tree size in bytes.The size includes the znode path and its value.</p> |DEPENDENT |zookeeper.approximate_data_size<p>**Preprocessing**:</p><p>- JSONPATH: `$.approximate_data_size`</p> |
+|Zookeeper |Zookeeper: File descriptors, max |<p>Maximum number of file descriptors that a zookeeper server can open.</p> |DEPENDENT |zookeeper.max_file_descriptor_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.max_file_descriptor_count`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zookeeper |Zookeeper: File descriptors, open |<p>Number of file descriptors that a zookeeper server has open.</p> |DEPENDENT |zookeeper.open_file_descriptor_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.open_file_descriptor_count`</p> |
+|Zookeeper |Zookeeper: Outstanding requests |<p>The number of queued requests when the server is under load and is receiving more sustained requests than it can process.</p> |DEPENDENT |zookeeper.outstanding_requests<p>**Preprocessing**:</p><p>- JSONPATH: `$.outstanding_requests`</p> |
+|Zookeeper |Zookeeper: Commit per sec |<p>The number of commits performed per second</p> |DEPENDENT |zookeeper.commit_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.commit_count`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Diff syncs per sec |<p>Number of diff syncs performed per second</p> |DEPENDENT |zookeeper.diff_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.diff_count`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Snap syncs per sec |<p>Number of snap syncs performed per second</p> |DEPENDENT |zookeeper.snap_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.snap_count`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Looking per sec |<p>Rate of transitions into looking state.</p> |DEPENDENT |zookeeper.looking_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.looking_count`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Alive connections |<p>Number of active clients connected to a zookeeper server.</p> |DEPENDENT |zookeeper.num_alive_connections<p>**Preprocessing**:</p><p>- JSONPATH: `$.num_alive_connections`</p> |
+|Zookeeper |Zookeeper: Global sessions |<p>Number of global sessions.</p> |DEPENDENT |zookeeper.global_sessions<p>**Preprocessing**:</p><p>- JSONPATH: `$.global_sessions`</p> |
+|Zookeeper |Zookeeper: Local sessions |<p>Number of local sessions.</p> |DEPENDENT |zookeeper.local_sessions<p>**Preprocessing**:</p><p>- JSONPATH: `$.local_sessions`</p> |
+|Zookeeper |Zookeeper: Drop connections per sec |<p>Rate of connection drops.</p> |DEPENDENT |zookeeper.connection_drop_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.connection_drop_count`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Rejected connections per sec |<p>Rate of connection rejected.</p> |DEPENDENT |zookeeper.connection_rejected.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.connection_rejected`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Revalidate connections per sec |<p>Rate ofconnection revalidations.</p> |DEPENDENT |zookeeper.connection_revalidate_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.connection_revalidate_count`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Revalidate per sec |<p>Rate of revalidations.</p> |DEPENDENT |zookeeper.revalidate_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.revalidate_count`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Latency, max |<p>The maximum amount of time it takes for the server to respond to a client request.</p> |DEPENDENT |zookeeper.max_latency<p>**Preprocessing**:</p><p>- JSONPATH: `$.max_latency`</p> |
+|Zookeeper |Zookeeper: Latency, min |<p>The minimum amount of time it takes for the server to respond to a client request.</p> |DEPENDENT |zookeeper.min_latency<p>**Preprocessing**:</p><p>- JSONPATH: `$.min_latency`</p> |
+|Zookeeper |Zookeeper: Latency, avg |<p>The average amount of time it takes for the server to respond to a client request.</p> |DEPENDENT |zookeeper.avg_latency<p>**Preprocessing**:</p><p>- JSONPATH: `$.avg_latency`</p> |
+|Zookeeper |Zookeeper: Znode count |<p>The number of znodes in the ZooKeeper namespace (the data)</p> |DEPENDENT |zookeeper.znode_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.znode_count`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|Zookeeper |Zookeeper: Ephemeral nodes count |<p>Number of ephemeral nodes that a zookeeper server has in its data tree.</p> |DEPENDENT |zookeeper.ephemerals_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.ephemerals_count`</p> |
+|Zookeeper |Zookeeper: Watch count |<p>Number of watches currently set on the local ZooKeeper process.</p> |DEPENDENT |zookeeper.watch_count<p>**Preprocessing**:</p><p>- JSONPATH: `$.watch_count`</p> |
+|Zookeeper |Zookeeper: Packets sent per sec |<p>The number of zookeeper packets sent from a server per second.</p> |DEPENDENT |zookeeper.packets_sent<p>**Preprocessing**:</p><p>- JSONPATH: `$.packets_sent`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Packets received per sec |<p>The number of zookeeper packets received by a server per second.</p> |DEPENDENT |zookeeper.packets_received.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.packets_received`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Bytes received per sec |<p>Number of bytes received per second.</p> |DEPENDENT |zookeeper.bytes_received_count.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.bytes_received_count`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper: Election time, avg |<p>Time between entering and leaving election.</p> |DEPENDENT |zookeeper.avg_election_time<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Zookeeper |Zookeeper: Elections |<p>Number of elections happened.</p> |DEPENDENT |zookeeper.cnt_election_time<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Zookeeper |Zookeeper: Fsync time, avg |<p>Time to fsync transaction log.</p> |DEPENDENT |zookeeper.avg_fsynctime<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Zookeeper |Zookeeper: Fsync |<p>Count of performed fsyncs.</p> |DEPENDENT |zookeeper.cnt_fsynctime<p>**Preprocessing**:</p><p>- JAVASCRIPT: `var metrics = JSON.parse(value) return metrics.cnt_fsynctime || metrics.fsynctime_count`</p> |
+|Zookeeper |Zookeeper: Snapshot write time, avg |<p>Average time to write a snapshot.</p> |DEPENDENT |zookeeper.avg_snapshottime<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|Zookeeper |Zookeeper: Snapshot writes |<p>Count of performed snapshot writes.</p> |DEPENDENT |zookeeper.cnt_snapshottime<p>**Preprocessing**:</p><p>- JAVASCRIPT: `var metrics = JSON.parse(value) return metrics.snapshottime_count || metrics.cnt_snapshottime`</p> |
+|Zookeeper |Zookeeper: Pending syncs{#SINGLETON} |<p>Number of pending syncs to carry out to ZooKeeper ensemble followers.</p> |DEPENDENT |zookeeper.pending_syncs[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.pending_syncs`</p> |
+|Zookeeper |Zookeeper: Quorum size{#SINGLETON} |<p>-</p> |DEPENDENT |zookeeper.quorum_size[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.quorum_size`</p> |
+|Zookeeper |Zookeeper: Synced followers{#SINGLETON} |<p>Number of synced followers reported when a node server_state is leader.</p> |DEPENDENT |zookeeper.synced_followers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.synced_followers`</p> |
+|Zookeeper |Zookeeper: Synced non-voting follower{#SINGLETON} |<p>Number of synced voting followers reported when a node server_state is leader.</p> |DEPENDENT |zookeeper.synced_non_voting_followers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.synced_non_voting_followers`</p> |
+|Zookeeper |Zookeeper: Synced observers{#SINGLETON} |<p>Number of synced observers.</p> |DEPENDENT |zookeeper.synced_observers[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.synced_observers`</p> |
+|Zookeeper |Zookeeper: Learners{#SINGLETON} |<p>Number of learners.</p> |DEPENDENT |zookeeper.learners[{#SINGLETON}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.learners`</p> |
+|Zookeeper |Zookeeper client {#TYPE} [{#CLIENT}]: Latency, max |<p>The maximum amount of time it takes for the server to respond to a client request.</p> |DEPENDENT |zookeeper.max_latency[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].max_latency.first()`</p> |
+|Zookeeper |Zookeeper client {#TYPE} [{#CLIENT}]: Latency, min |<p>The minimum amount of time it takes for the server to respond to a client request.</p> |DEPENDENT |zookeeper.min_latency[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].min_latency.first()`</p> |
+|Zookeeper |Zookeeper client {#TYPE} [{#CLIENT}]: Latency, avg |<p>The average amount of time it takes for the server to respond to a client request.</p> |DEPENDENT |zookeeper.avg_latency[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].avg_latency.first()`</p> |
+|Zookeeper |Zookeeper client {#TYPE} [{#CLIENT}]: Packets sent per sec |<p>The number of packets sent.</p> |DEPENDENT |zookeeper.packets_sent[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].packets_sent.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper client {#TYPE} [{#CLIENT}]: Packets received per sec |<p>The number of packets received.</p> |DEPENDENT |zookeeper.packets_received[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].packets_received.first()`</p><p>- CHANGE_PER_SECOND</p> |
+|Zookeeper |Zookeeper client {#TYPE} [{#CLIENT}]: Outstanding requests |<p>The number of queued requests when the server is under load and is receiving more sustained requests than it can process.</p> |DEPENDENT |zookeeper.outstanding_requests[{#TYPE},{#CLIENT}]<p>**Preprocessing**:</p><p>- JSONPATH: `$.{#TYPE}.[?(@.remote_socket_address == "{#ADDRESS}")].outstanding_requests.first()`</p> |
## Triggers
-| Name | Description | Expression | Severity | Dependencies and additional info |
-|----------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|----------------------------------|
-| Zookeeper: Server mode has changed (new mode: {ITEM.VALUE}) | <p>Zookeeper node state has changed. Ack to close.</p> | `{TEMPLATE_NAME:zookeeper.server_state.diff()}=1 and {TEMPLATE_NAME:zookeeper.server_state.strlen()}>0` | INFO | <p>Manual close: YES</p> |
-| Zookeeper: has been restarted (uptime < 10m) | <p>Uptime is less than 10 minutes</p> | `{TEMPLATE_NAME:zookeeper.uptime.last()}<10m` | INFO | <p>Manual close: YES</p> |
-| Zookeeper: Failed to fetch info data (or no data for 10m) | <p>Zabbix has not received data for items for the last 10 minutes</p> | `{TEMPLATE_NAME:zookeeper.uptime.nodata(10m)}=1` | WARNING | <p>Manual close: YES</p> |
-| Zookeeper: Version has changed (new version: {ITEM.VALUE}) | <p>Zookeeper version has changed. Ack to close.</p> | `{TEMPLATE_NAME:zookeeper.version.diff()}=1 and {TEMPLATE_NAME:zookeeper.version.strlen()}>0` | INFO | <p>Manual close: YES</p> |
-| Zookeeper: Too many file descriptors used (over {$ZOOKEEPER.FILE_DESCRIPTORS.MAX.WARN}% for 5 min) | <p>Number of file descriptors used more than {$ZOOKEEPER.FILE_DESCRIPTORS.MAX.WARN}% of the available number of file descriptors.</p> | `{TEMPLATE_NAME:zookeeper.open_file_descriptor_count.min(5m)} * 100 / {Zookeeper by HTTP:zookeeper.max_file_descriptor_count.last()} > {$ZOOKEEPER.FILE_DESCRIPTORS.MAX.WARN}` | WARNING | |
-| Zookeeper: Too many queued requests (over {$ZOOKEEPER.OUTSTANDING_REQ.MAX.WARN}% for 5 min) | <p>Number of queued requests in the server. This goes up when the server receives more requests than it can process.</p> | `{TEMPLATE_NAME:zookeeper.outstanding_requests.min(5m)}>{$ZOOKEEPER.OUTSTANDING_REQ.MAX.WARN}` | AVERAGE | <p>Manual close: YES</p> |
-| Zookeeper: Too many pending syncs (over {$ZOOKEEPER.PENDING_SYNCS.MAX.WARN}% for 5 min) | <p>-</p> | `{TEMPLATE_NAME:zookeeper.pending_syncs[{#SINGLETON}].min(5m)}>{$ZOOKEEPER.PENDING_SYNCS.MAX.WARN}` | AVERAGE | <p>Manual close: YES</p> |
-| Zookeeper: Too few active followers | <p>The number of followers should equal the total size of your ZooKeeper ensemble, minus 1 (the leader is not included in the follower count). If the ensemble fails to maintain quorum, all automatic failover features are suspended. </p> | `{TEMPLATE_NAME:zookeeper.synced_followers[{#SINGLETON}].last()} < {Zookeeper by HTTP:zookeeper.quorum_size[{#SINGLETON}].last()}-1` | AVERAGE | |
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|Zookeeper: Server mode has changed (new mode: {ITEM.VALUE}) |<p>Zookeeper node state has changed. Ack to close.</p> |`last(/Zookeeper by HTTP/zookeeper.server_state,#1)<>last(/Zookeeper by HTTP/zookeeper.server_state,#2) and length(last(/Zookeeper by HTTP/zookeeper.server_state))>0` |INFO |<p>Manual close: YES</p> |
+|Zookeeper: has been restarted (uptime < 10m) |<p>Uptime is less than 10 minutes</p> |`last(/Zookeeper by HTTP/zookeeper.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|Zookeeper: Failed to fetch info data (or no data for 10m) |<p>Zabbix has not received data for items for the last 10 minutes</p> |`nodata(/Zookeeper by HTTP/zookeeper.uptime,10m)=1` |WARNING |<p>Manual close: YES</p> |
+|Zookeeper: Version has changed (new version: {ITEM.VALUE}) |<p>Zookeeper version has changed. Ack to close.</p> |`last(/Zookeeper by HTTP/zookeeper.version,#1)<>last(/Zookeeper by HTTP/zookeeper.version,#2) and length(last(/Zookeeper by HTTP/zookeeper.version))>0` |INFO |<p>Manual close: YES</p> |
+|Zookeeper: Too many file descriptors used (over {$ZOOKEEPER.FILE_DESCRIPTORS.MAX.WARN}% for 5 min) |<p>Number of file descriptors used more than {$ZOOKEEPER.FILE_DESCRIPTORS.MAX.WARN}% of the available number of file descriptors.</p> |`min(/Zookeeper by HTTP/zookeeper.open_file_descriptor_count,5m) * 100 / last(/Zookeeper by HTTP/zookeeper.max_file_descriptor_count) > {$ZOOKEEPER.FILE_DESCRIPTORS.MAX.WARN}` |WARNING | |
+|Zookeeper: Too many queued requests (over {$ZOOKEEPER.OUTSTANDING_REQ.MAX.WARN}% for 5 min) |<p>Number of queued requests in the server. This goes up when the server receives more requests than it can process.</p> |`min(/Zookeeper by HTTP/zookeeper.outstanding_requests,5m)>{$ZOOKEEPER.OUTSTANDING_REQ.MAX.WARN}` |AVERAGE |<p>Manual close: YES</p> |
+|Zookeeper: Too many pending syncs (over {$ZOOKEEPER.PENDING_SYNCS.MAX.WARN}% for 5 min) |<p>-</p> |`min(/Zookeeper by HTTP/zookeeper.pending_syncs[{#SINGLETON}],5m)>{$ZOOKEEPER.PENDING_SYNCS.MAX.WARN}` |AVERAGE |<p>Manual close: YES</p> |
+|Zookeeper: Too few active followers |<p>The number of followers should equal the total size of your ZooKeeper ensemble, minus 1 (the leader is not included in the follower count). If the ensemble fails to maintain quorum, all automatic failover features are suspended. </p> |`last(/Zookeeper by HTTP/zookeeper.synced_followers[{#SINGLETON}]) < last(/Zookeeper by HTTP/zookeeper.quorum_size[{#SINGLETON}])-1` |AVERAGE | |
## Feedback
diff --git a/templates/app/zookeeper_http/template_app_zookeeper_http.yaml b/templates/app/zookeeper_http/template_app_zookeeper_http.yaml
index 2b96a467bed..becf15d481f 100644
--- a/templates/app/zookeeper_http/template_app_zookeeper_http.yaml
+++ b/templates/app/zookeeper_http/template_app_zookeeper_http.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2021-05-11T08:05:18Z'
+ date: '2021-12-19T15:20:02Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -12,12 +12,12 @@ zabbix_export:
name: 'Zookeeper by HTTP'
description: |
Get Apache Zookeeper metrics by HTTP agent.
- This template works with standalone and cluster instances. Metrics are collected from each Zookeper node by requests to AdminServer.
+ This template works with standalone and cluster instances. Metrics are collected from each Zookeper node by requests to AdminServer.
Don't forget to change macros {$ZOOKEEPER.COMMAND_URL}, {$ZOOKEEPER.PORT}, {$ZOOKEEPER.SCHEME}.
You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
- Template tooling version used: 0.38
+ Template tooling version used: 0.40
groups:
-
name: Templates/Applications
@@ -183,8 +183,8 @@ zabbix_export:
parameters:
- |
var metrics = JSON.parse(value);
- if (metrics.server_state === "standalone") {
- return 0
+ if (metrics.server_state === "standalone") {
+ return 0
}
else {
return metrics.cnt_election_time || metrics.election_time_count