Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/zabbix/zabbix.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitrijs Goloscapovs <dmitrijs.goloscapovs@zabbix.com>2022-06-20 16:35:33 +0300
committerDmitrijs Goloscapovs <dmitrijs.goloscapovs@zabbix.com>2022-06-20 16:35:33 +0300
commitc6040e7b8b93256633ec4aa7dc98263499b96e7c (patch)
tree3b0e635b878e01b0bc17091b2e7abd93b47ec10c
parent4c9885e6bb4fce783367fa7fc90ed33d6ff94fae (diff)
parentc488a211647f6b53b9c7423678f6c39601cb1db2 (diff)
.......... [ZBX-20638] updated to latest master
-rw-r--r--.github/workflows/sonarcloud-others.yml26
-rw-r--r--.github/workflows/sonarcloud.yml65
-rw-r--r--ChangeLog70
-rw-r--r--ChangeLog.d/bugfix/ZBX-201051
-rw-r--r--ChangeLog.d/bugfix/ZBX-209651
-rw-r--r--ChangeLog.d/bugfix/ZBX-210661
-rw-r--r--build.xml24
-rw-r--r--configure.ac83
-rw-r--r--create/src/schema.tmpl2
-rw-r--r--include/common.h7
-rw-r--r--include/version.h4
-rw-r--r--m4/libmodbus.m414
-rw-r--r--m4/libopenssl.m421
-rw-r--r--m4/libxml2.m42
-rw-r--r--m4/pcre.m4333
-rw-r--r--m4/pcre2.m4333
-rw-r--r--sass/stylesheets/sass/components/_toc.scss2
-rw-r--r--sass/stylesheets/sass/screen.scss7
-rw-r--r--sonar-project.properties5
-rw-r--r--src/go/cmd/zabbix_agent2/external.go13
-rw-r--r--src/go/cmd/zabbix_agent2/zabbix_agent2.go3
-rw-r--r--src/go/cmd/zabbix_web_service/zabbix_web_service.go2
-rw-r--r--src/go/conf/zabbix_agent2.d/plugins.d/mongodb.conf41
-rw-r--r--src/go/go.mod3
-rw-r--r--src/go/go.sum17
-rw-r--r--src/go/internal/agent/scheduler/manager.go3
-rw-r--r--src/go/pkg/version/version.go14
-rw-r--r--src/go/plugins/external/broker.go3
-rw-r--r--src/go/plugins/external/plugin.go1
-rw-r--r--src/go/plugins/mongodb/README.md143
-rw-r--r--src/go/plugins/mongodb/config.go57
-rw-r--r--src/go/plugins/mongodb/conn.go293
-rw-r--r--src/go/plugins/mongodb/handler_collection_stats.go54
-rw-r--r--src/go/plugins/mongodb/handler_collection_stats_test.go84
-rw-r--r--src/go/plugins/mongodb/handler_collections_discovery.go69
-rw-r--r--src/go/plugins/mongodb/handler_collections_discovery_test.go78
-rw-r--r--src/go/plugins/mongodb/handler_collections_usage.go55
-rw-r--r--src/go/plugins/mongodb/handler_collections_usage_test.go70
-rw-r--r--src/go/plugins/mongodb/handler_config_discovery.go96
-rw-r--r--src/go/plugins/mongodb/handler_connPool_stats.go54
-rw-r--r--src/go/plugins/mongodb/handler_connPool_stats_test.go90
-rw-r--r--src/go/plugins/mongodb/handler_database_stats.go54
-rw-r--r--src/go/plugins/mongodb/handler_database_stats_test.go91
-rw-r--r--src/go/plugins/mongodb/handler_databases_discovery.go55
-rw-r--r--src/go/plugins/mongodb/handler_databases_discovery_test.go60
-rw-r--r--src/go/plugins/mongodb/handler_jumbo_chunks.go36
-rw-r--r--src/go/plugins/mongodb/handler_oplog_stats.go92
-rw-r--r--src/go/plugins/mongodb/handler_oplog_stats_test.go95
-rw-r--r--src/go/plugins/mongodb/handler_ping.go35
-rw-r--r--src/go/plugins/mongodb/handler_replset_config.go58
-rw-r--r--src/go/plugins/mongodb/handler_replset_config_test.go71
-rw-r--r--src/go/plugins/mongodb/handler_replset_status.go162
-rw-r--r--src/go/plugins/mongodb/handler_server_status.go58
-rw-r--r--src/go/plugins/mongodb/handler_server_status_test.go71
-rw-r--r--src/go/plugins/mongodb/handler_shards_discovery.go87
-rw-r--r--src/go/plugins/mongodb/metrics.go159
-rw-r--r--src/go/plugins/mongodb/mockconn.go190
-rw-r--r--src/go/plugins/mongodb/mongodb.go114
-rw-r--r--src/go/plugins/mongodb/testdata/collStats.json1
-rw-r--r--src/go/plugins/mongodb/testdata/connPoolStats.json1
-rw-r--r--src/go/plugins/mongodb/testdata/dbStats.json1
-rw-r--r--src/go/plugins/mongodb/testdata/replSetGetConfig.json1
-rw-r--r--src/go/plugins/mongodb/testdata/serverStatus.json1
-rw-r--r--src/go/plugins/mongodb/testdata/top.json1
-rw-r--r--src/go/plugins/plugins_darwin.go1
-rw-r--r--src/go/plugins/plugins_linux.go1
-rw-r--r--src/go/plugins/plugins_windows.go1
-rw-r--r--src/libs/zbxcommon/comms.c92
-rw-r--r--src/libs/zbxcommon/str.c106
-rw-r--r--src/libs/zbxdbhigh/db.c5
-rw-r--r--src/libs/zbxdbupgrade/dbupgrade_6000.c150
-rw-r--r--src/libs/zbxembed/console.c2
-rw-r--r--src/libs/zbxembed/embed.c130
-rw-r--r--src/libs/zbxembed/embed.h2
-rw-r--r--src/libs/zbxembed/global.c4
-rw-r--r--src/libs/zbxembed/httprequest.c10
-rw-r--r--src/libs/zbxembed/zabbix.c2
-rw-r--r--src/libs/zbxeval/parse.c8
-rw-r--r--src/zabbix_java/src/com/zabbix/gateway/GeneralInformation.java2
-rw-r--r--src/zabbix_js/Makefile.am6
-rw-r--r--src/zabbix_proxy/Makefile.am1
-rw-r--r--src/zabbix_server/Makefile.am1
-rw-r--r--src/zabbix_server/ha/ha_manager.c14
-rw-r--r--src/zabbix_server/poller/checks_simple_vmware.c6
-rw-r--r--src/zabbix_server/trapper/Makefile.am3
-rw-r--r--src/zabbix_server/trapper/trapper.c92
-rw-r--r--src/zabbix_server/vmware/vmware.c26
-rw-r--r--templates/app/iis_agent/README.md12
-rw-r--r--templates/app/iis_agent/template_app_iis_agent.yaml26
-rw-r--r--templates/app/iis_agent_active/README.md12
-rw-r--r--templates/app/iis_agent_active/template_app_iis_agent_active.yaml26
-rw-r--r--templates/app/ntp_service/README.md4
-rw-r--r--templates/app/ntp_service/template_app_ntp_service.yaml6
-rw-r--r--templates/app/pfsense_snmp/README.md78
-rw-r--r--templates/app/pfsense_snmp/template_app_pfsense_snmp.yaml1389
-rw-r--r--templates/db/mongodb/README.md2
-rw-r--r--templates/db/mongodb/template_db_mongodb.yaml8
-rw-r--r--templates/db/mongodb_cluster/README.md9
-rw-r--r--templates/db/mongodb_cluster/template_db_mongodb_cluster.yaml15
-rw-r--r--templates/db/mssql_odbc/README.md2
-rw-r--r--templates/db/mssql_odbc/template_db_mssql_odbc.yaml3
-rw-r--r--templates/media/express.ms/media_express_ms.yaml22
-rw-r--r--templates/media/mattermost/README.md6
-rw-r--r--templates/media/msteams/media_msteams.yaml4
-rw-r--r--templates/media/slack/README.md10
-rw-r--r--templates/module/windows_agent/README.md2
-rw-r--r--templates/module/windows_agent/template_module_windows_agent.yaml4
-rw-r--r--templates/module/windows_agent_active/README.md2
-rw-r--r--templates/module/windows_agent_active/template_module_windows_agent_active.yaml4
-rw-r--r--templates/os/freebsd/template_os_freebsd.yaml4
-rw-r--r--templates/os/windows_agent/README.md2
-rw-r--r--templates/os/windows_agent/template_os_windows_agent.yaml4
-rw-r--r--templates/os/windows_agent_active/README.md2
-rw-r--r--templates/os/windows_agent_active/template_os_windows_agent_active.yaml4
-rw-r--r--templates/san/hpe_msa2040_http/README.md240
-rw-r--r--templates/san/hpe_msa2040_http/template_san_hpe_msa2040_http.yaml4417
-rw-r--r--templates/san/hpe_msa2060_http/README.md250
-rw-r--r--templates/san/hpe_msa2060_http/template_san_hpe_msa2060_http.yaml4559
-rw-r--r--templates/san/hpe_primera_http/README.md189
-rw-r--r--templates/san/hpe_primera_http/template_san_hpe_primera_http.yaml4681
-rw-r--r--tests/libs/zbxeval/zbx_eval_parse_expression.yaml17
-rw-r--r--ui/app/controllers/CControllerPopupGeneric.php2
-rw-r--r--ui/app/partials/configuration.filter.items.php6
-rw-r--r--ui/app/partials/configuration.host.edit.html.php14
-rw-r--r--ui/app/partials/js/configuration.host.edit.html.js.php2
-rw-r--r--ui/app/partials/layout.htmlpage.header.php2
-rw-r--r--ui/app/partials/monitoring.latest.filter.php9
-rw-r--r--ui/app/partials/scheduledreport.formgrid.html.php2
-rw-r--r--ui/app/views/administration.token.edit.php2
-rw-r--r--ui/app/views/administration.user.edit.php2
-rw-r--r--ui/app/views/administration.user.list.php2
-rw-r--r--ui/app/views/administration.user.token.edit.php5
-rw-r--r--ui/app/views/monitoring.widget.item.view.php14
-rw-r--r--ui/app/views/popup.condition.common.php34
-rw-r--r--ui/app/views/popup.generic.php8
-rw-r--r--ui/app/views/popup.import.compare.php4
-rw-r--r--ui/app/views/slareport.list.php4
-rw-r--r--ui/assets/styles/blue-theme.css16
-rw-r--r--ui/assets/styles/dark-theme.css16
-rw-r--r--ui/assets/styles/hc-dark.css16
-rw-r--r--ui/assets/styles/hc-light.css16
-rw-r--r--ui/chart4.php4
-rw-r--r--ui/hostinventoriesoverview.php4
-rw-r--r--ui/include/classes/core/CConfigFile.php4
-rw-r--r--ui/include/classes/core/ZBase.php6
-rw-r--r--ui/include/classes/data/CItemData.php2
-rw-r--r--ui/include/classes/db/OracleDbBackend.php2
-rw-r--r--ui/include/classes/graphdraw/CGraphDraw.php23
-rw-r--r--ui/include/classes/graphdraw/CLineGraphDraw.php180
-rw-r--r--ui/include/classes/graphdraw/CPieGraphDraw.php28
-rw-r--r--ui/include/classes/helpers/CArrayHelper.php10
-rw-r--r--ui/include/classes/ldap/CLdap.php2
-rw-r--r--ui/include/classes/macros/CMacrosResolverGeneral.php4
-rw-r--r--ui/include/classes/server/CZabbixServer.php2
-rw-r--r--ui/include/classes/validators/CApiInputValidator.php6
-rw-r--r--ui/include/classes/widgets/CWidgetHelper.php12
-rw-r--r--ui/include/classes/widgets/views/widget.item.form.view.php4
-rw-r--r--ui/include/config.inc.php2
-rw-r--r--ui/include/defines.inc.php4
-rw-r--r--ui/include/draw.inc.php4
-rw-r--r--ui/include/func.inc.php11
-rw-r--r--ui/include/graphs.inc.php7
-rw-r--r--ui/include/images.inc.php14
-rw-r--r--ui/include/items.inc.php56
-rw-r--r--ui/include/views/configuration.graph.edit.php18
-rw-r--r--ui/include/views/configuration.graph.list.php4
-rw-r--r--ui/include/views/configuration.host.prototype.edit.php4
-rw-r--r--ui/include/views/configuration.httpconf.list.php4
-rw-r--r--ui/include/views/configuration.item.edit.php2
-rw-r--r--ui/include/views/configuration.template.edit.php2
-rw-r--r--ui/include/views/configuration.triggers.list.php4
-rw-r--r--ui/include/views/js/configuration.host.prototype.edit.js.php6
-rw-r--r--ui/include/views/js/monitoring.sysmaps.js.php2
-rw-r--r--ui/js/class.notifications.js2
-rw-r--r--ui/locale/bg/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/ca/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/cs/LC_MESSAGES/frontend.po173
-rw-r--r--ui/locale/de/LC_MESSAGES/frontend.po14
-rw-r--r--ui/locale/el/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/en/LC_MESSAGES/frontend.pot4
-rw-r--r--ui/locale/en_GB/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/es/LC_MESSAGES/frontend.po70
-rw-r--r--ui/locale/fa/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/fi/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/fr/LC_MESSAGES/frontend.po10
-rw-r--r--ui/locale/he/LC_MESSAGES/frontend.po52
-rw-r--r--ui/locale/hu/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/id/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/it/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/ja/LC_MESSAGES/frontend.po22
-rw-r--r--ui/locale/ka/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/ko/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/lt/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/lv/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/nb_NO/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/nl/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/pl/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/pt_BR/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/pt_PT/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/ro/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/ru/LC_MESSAGES/frontend.po14
-rw-r--r--ui/locale/sk/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/sv_SE/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/tr/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/uk/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/vi/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/zh_CN/LC_MESSAGES/frontend.po4
-rw-r--r--ui/locale/zh_TW/LC_MESSAGES/frontend.po4
-rw-r--r--ui/report2.php2
-rw-r--r--ui/tests/api_json/testAPIInfo.php2
-rw-r--r--ui/tests/include/CIntegrationTest.php57
-rw-r--r--ui/tests/include/helpers/CDBHelper.php110
-rw-r--r--ui/tests/include/web/CElement.php18
-rw-r--r--ui/tests/integration/testAgentItems.php23
-rw-r--r--ui/tests/integration/testGoAgentDataCollection.php23
-rw-r--r--ui/tests/integration/testItemState.php5
-rw-r--r--ui/tests/selenium/testFormSetup.php2
-rw-r--r--ui/tests/selenium/testPageAdministrationGeneralModules.php2
-rw-r--r--ui/tests/selenium/testUrlParameters.php4
-rw-r--r--ui/tests/templates/zabbix.conf.php2
220 files changed, 16451 insertions, 5295 deletions
diff --git a/.github/workflows/sonarcloud-others.yml b/.github/workflows/sonarcloud-others.yml
new file mode 100644
index 00000000000..787c918767a
--- /dev/null
+++ b/.github/workflows/sonarcloud-others.yml
@@ -0,0 +1,26 @@
+name: SonarCloud Others Build
+
+on:
+ push:
+ branches:
+ - 'master'
+ - 'release/*'
+ pull_request:
+ types: [opened, synchronize, reopened]
+
+jobs:
+ SonarCloud-others:
+ name: SonarCloud-others
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
+ - name: SonarCloud Scan
+ uses: SonarSource/sonarcloud-github-action@master
+ with:
+ args: >
+ -Dsonar.sources=ui/
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any
+ SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
diff --git a/.github/workflows/sonarcloud.yml b/.github/workflows/sonarcloud.yml
new file mode 100644
index 00000000000..477445f0e25
--- /dev/null
+++ b/.github/workflows/sonarcloud.yml
@@ -0,0 +1,65 @@
+name: SonarCloud C Build
+
+on:
+ push:
+ branches:
+ - 'master'
+ - 'release/*'
+
+jobs:
+
+ SonarCloud-C:
+ name: SonarCloud-C
+ runs-on: ubuntu-latest
+ env:
+ SONAR_SCANNER_VERSION: 4.7.0.2747
+ SONAR_SERVER_URL: "https://sonarcloud.io"
+ BUILD_WRAPPER_OUT_DIR: $GITHUB_WORKSPACE/build_wrapper_output_directory # Directory where build-wrapper output will be placed
+ steps:
+
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
+
+ - name: Set up JDK 11
+ uses: actions/setup-java@v1
+ with:
+ java-version: 11
+
+ - name: Download and set up sonar-scanner
+ env:
+ SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip
+ run: |
+ mkdir -p $HOME/.sonar
+ curl -sSLo $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }}
+ unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/
+ echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> $GITHUB_PATH
+
+
+ - name: Download and compile Zabbix
+ run: |
+ sudo apt-get install libsnmp-dev snmp libopenipmi-dev libevent-dev libcurl4-openssl-dev
+ cd $GITHUB_WORKSPACE
+ sh bootstrap.sh
+ ./configure --enable-server --enable-agent --with-postgresql --enable-ipv6 --with-net-snmp --with-libcurl --with-libxml2 --with-openipmi --enable-proxy --enable-agent2 --enable-java --prefix=$(pwd) --with-openssl
+ make
+ make clean
+
+ - name: Download and set up build-wrapper
+ env:
+ BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip
+ run: |
+ curl -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}
+ unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/
+ echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH
+
+ - name: Run build-wrapper
+ run: |
+ build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} make clean all
+
+ - name: Run sonar-scanner
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
+ run: |
+ sonar-scanner --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}"
diff --git a/ChangeLog b/ChangeLog
index 7d5692dd9e4..4f617168b2f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,42 @@
+Changes for 6.0.6rc1
+
+New features:
+..F....... [ZBXNEXT-7769] added support of PHP 8.1 to LDAP authentication (gcalenko)
+..F....... [ZBXNEXT-7722] fixed runtime errors when using Zabbix with PHP 8.1 (agriscenko)
+...G.....T [ZBXNEXT-6891] removed mongodb implementation, updated template to work with new plugin (esneiders, ychukina)
+.........T [ZBXNEXT-7632] added template for HPE Primera (vkhaliev)
+.........T [ZBXNEXT-7630] added templates for HPE MSA 2060 and MSA 2040 (drasihov)
+
+Bug fixes:
+..F.....S. [ZBX-20994] fixed access limitation for Oracle DB to list tables accessible to the user (asestakovs)
+.......PS. [ZBX-21123] fixed crash when VMware VC was not available at the first moment when starting zabbix_server (MVekslers)
+........S. [ZBX-21044] fixed javascript preprocessing output conversion to utf-8 (kprutkovs)
+..F....... [ZBX-20534] fixed popup layout for importing when horizontal scroll was not visible (rlataria)
+..F....... [ZBX-21185] fixed pattern selector when item name contains square brackets (miks)
+..F....... [ZBX-20669] fixed focus on fields when clicking on labels (esekace)
+....I..... [ZBX-21064] rewrote pcre.m4 and pcre2.m4 file (yurii)
+.........T [ZBX-20029] fixed deprecated keys in IIS by Zabbix agent templates (drasihov)
+.D........ [ZBX-18035] updated README.md for Slack and Mattermost media types (drasihov)
+.........T [ZBX-21199] fixed space utilization items in HPE MSA 2040 and 2060 templates (drasihov)
+....I..... [ZBX-20861] fixed incorrect naming of web scenario items after upgrade (vso)
+..F....... [ZBX-20847] fixed inability to attach any host groups after removing already attached ones (agriscenko)
+A.F....... [ZBX-20233] fixed handling of empty configuration parameters $ZBX_SERVER and $ZBX_SERVER_PORT the same as if they were not defined (Sasha)
+..F....... [ZBX-21163] fixed "Host inventory overview" page behavior when selected host group was removed (Sasha)
+..F....... [ZBX-20854] fixed missing headers in Columns field in Top hosts widget configuration form (dpetra)
+.........T [ZBX-21143] changed API URL in Express.ms media type to a new version (drasihov)
+........S. [ZBX-20914] fixed wrong calculation of calculated item formulas (akozlovs)
+.........T [ZBX-20875] fixed filter condition for job discovery rule in MSSQL by ODBC template (drasihov)
+.........T [ZBX-21146] fixed item key in NTP service template (drasihov)
+..F....... [ZBX-20565] fixed visibility of placeholder text in host prototype edit form (kkuzmina)
+.........T [ZBX-20985] fixed notifications summary for MS Teams media type (drasihov)
+.......PS. [ZBX-21137] fixed VMware collector crash related to datastore state disconnected from all HVs (MVekslers)
+.........T [ZBX-20628] removed system metrics from PFSense template (vkhaliev)
+.........T [ZBX-21136] fixed value type for checksum of /etc/passwd item in FreeBSD template (vkhaliev)
+.........T [ZBX-20903] removed duplicate in Windows services discovery filter regexp (drasihov)
+..F....... [ZBX-18652] fixed data overview discrepancy between host location top and left (agriscenko)
+........S. [ZBX-21111] fixed HA manager stopping (kprutkovs)
+
+--------------------------------------------------------------------------------
Changes for 6.0.5
6.0.5rc1 was released as 6.0.5 without any changes
@@ -105,7 +144,7 @@ New features:
A.F....... [ZBXNEXT-7478] fixed optional parameters before required issue (acikuns, ashubin)
Bug fixes:
-A......... [ZBX-20681] fixed undefined index error when accessing module page with guest user (rlataria)
+..F....... [ZBX-20681] fixed undefined index error when accessing module page with guest user (rlataria)
A.F.....S. [ZBX-20650] fixed performance issue with importing templates and hosts (Sasha)
.......... [ZBX-20747] fixed service algorithm update in 6.0 database upgrade patch (wiper)
A.F....... [ZBX-19118] made Host interface field non-required for HTTP agent items (jfreibergs)
@@ -1905,6 +1944,23 @@ A......... [ZBX-17955] fixed "medias" parameter named inconsistency in user.crea
.......PS. [ZBX-17548] don't store text items with history 0 in proxy history (wiper)
--------------------------------------------------------------------------------
+Changes for 5.0.25rc1
+
+Bug fixes:
+..F.....S. [ZBX-20994] fixed access limitation for Oracle DB to list tables accessible to the user (asestakovs)
+.......PS. [ZBX-21123] fixed crash when VMware VC was not available at the first moment when starting zabbix_server (MVekslers)
+.........T [ZBX-20029] fixed deprecated keys in IIS by Zabbix agent templates (drasihov)
+.D........ [ZBX-18035] updated README.md for Slack and Mattermost media types (drasihov)
+..F....... [ZBX-21163] fixed "Host inventory overview" page behavior when selected host group was removed (Sasha)
+.........T [ZBX-21143] changed API URL in Express.ms media type to a new version (drasihov)
+.........T [ZBX-20875] fixed filter condition for job discovery rule in MSSQL by ODBC template (drasihov)
+.......PS. [ZBX-21137] fixed VMware collector crash related to datastore state disconnected from all HVs (MVekslers)
+.........T [ZBX-20985] fixed notifications summary for MS Teams media type (drasihov)
+.........T [ZBX-20628] removed system metrics from PFSense template (vkhaliev)
+.........T [ZBX-20903] removed duplicate in Windows services discovery filter regexp (drasihov)
+..F....... [ZBX-18652] fixed data overview discrepancy between host location top and left (agriscenko)
+
+--------------------------------------------------------------------------------
Changes for 5.0.24
5.0.24rc1 was released as 5.0.24 without any changes
@@ -1967,7 +2023,7 @@ A.F....... [ZBXNEXT-7485] fixed debug_backtrace function usage for PHP8 (acikuns
.........T [ZBXNEXT-7493] added garbage collector and memory pool discovery to generic Java template (vkhaliev)
Bug fixes:
-A......... [ZBX-20681] fixed undefined index error when accessing module page with guest user (rlataria)
+..F....... [ZBX-20681] fixed undefined index error when accessing module page with guest user (rlataria)
.......PS. [ZBX-20518] fixed lastaccess update during proxy throttling (dgoloscapov)
...G...... [ZBX-20634] fixed handling of multiline FTP response (ssimonenko)
...G...... [ZBX-20528] fixed issue with pgsql.custom.query JSON response failing for Zabbix agent 2 (esneiders)
@@ -4405,6 +4461,16 @@ A.F.I...S. [ZBXNEXT-4853,ZBXNEXT-517] added ability to send email messages in HT
..F.....S. [ZBXNEXT-1282,ZBXNEXT-4730] added changes to introduce regex based matching for auto-registration (vasilijs, viktors)
--------------------------------------------------------------------------------
+Changes for 4.0.42rc1
+
+New features:
+A......PS. [ZBXNEXT-7694] added "utf8mb3" character set support for MySQL database (Sasha)
+
+Bug fixes:
+.......PS. [ZBX-21123] fixed crash when VMware VC was not available at the first moment when starting zabbix_server (MVekslers)
+.......PS. [ZBX-21137] fixed VMware collector crash related to datastore state disconnected from all HVs (MVekslers)
+
+--------------------------------------------------------------------------------
Changes for 4.0.41
4.0.41rc1 was released as 4.0.41 without any changes
diff --git a/ChangeLog.d/bugfix/ZBX-20105 b/ChangeLog.d/bugfix/ZBX-20105
new file mode 100644
index 00000000000..1a01268aeb3
--- /dev/null
+++ b/ChangeLog.d/bugfix/ZBX-20105
@@ -0,0 +1 @@
+...GI..PS. [ZBX-20105] removed bogus dependency on libxml2 from agent (yurii)
diff --git a/ChangeLog.d/bugfix/ZBX-20965 b/ChangeLog.d/bugfix/ZBX-20965
new file mode 100644
index 00000000000..08dbc1423b1
--- /dev/null
+++ b/ChangeLog.d/bugfix/ZBX-20965
@@ -0,0 +1 @@
+..F....... [ZBX-20965] fixed positioning of change indicator in Item value widget (dpetra)
diff --git a/ChangeLog.d/bugfix/ZBX-21066 b/ChangeLog.d/bugfix/ZBX-21066
new file mode 100644
index 00000000000..f035e7f06b8
--- /dev/null
+++ b/ChangeLog.d/bugfix/ZBX-21066
@@ -0,0 +1 @@
+....I..... [ZBX-21066] fixed libopenssl.m4 when dealing with lib/64 in openssl 3 (yurii)
diff --git a/build.xml b/build.xml
index ce9d638eb7f..9fab079dd34 100644
--- a/build.xml
+++ b/build.xml
@@ -17,6 +17,7 @@
<target name="create-frontend-configs">
<property name="dbtype" value="${DBTYPE}"/>
<property name="dbhost" value="${DBHOST}"/>
+ <property name="dbport" value="${DBPORT}"/>
<property name="dbname" value="${DBNAME}"/>
<property name="dbuser" value="${DBUSER}"/>
<property name="dbpassword" value="${DBPASSWORD}"/>
@@ -27,6 +28,7 @@
<copy overwrite="true" file="ui/tests/templates/zabbix.conf.php" tofile="ui/conf/zabbix.conf.php"/>
<replace file="ui/conf/zabbix.conf.php" token="{DBTYPE}" value="${dbtype}"/>
<replace file="ui/conf/zabbix.conf.php" token="{DBHOST}" value="${dbhost}"/>
+ <replace file="ui/conf/zabbix.conf.php" token="'{DBPORT}'" value="${dbport}"/>
<replace file="ui/conf/zabbix.conf.php" token="{DBNAME}" value="${dbname}"/>
<replace file="ui/conf/zabbix.conf.php" token="{DBUSER}" value="${dbuser}"/>
<replace file="ui/conf/zabbix.conf.php" token="{DBPASSWORD}" value="${dbpassword}"/>
@@ -150,6 +152,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="--set ON_ERROR_STOP=1"/>
<arg line="-c 'create database ${dbname} ENCODING = UTF8 TEMPLATE = template0'"/>
@@ -158,6 +161,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="-1"/>
<arg line="--set ON_ERROR_STOP=1"/>
@@ -167,6 +171,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="-1"/>
<arg line="--set ON_ERROR_STOP=1"/>
@@ -176,6 +181,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="--set ON_ERROR_STOP=1"/>
<arg line="${dbname}"/>
@@ -188,6 +194,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="--set ON_ERROR_STOP=1"/>
<arg line="-c 'drop database if exists ${dbname}'"/>
@@ -200,6 +207,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="--set ON_ERROR_STOP=1"/>
<arg line="-c 'drop database if exists ${dbname}_proxy'"/>
@@ -208,6 +216,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="--set ON_ERROR_STOP=1"/>
<arg line="-c 'create database ${dbname}_proxy ENCODING = UTF8 TEMPLATE = template0'"/>
@@ -216,6 +225,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="-1"/>
<arg line="--set ON_ERROR_STOP=1"/>
@@ -228,6 +238,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="-1"/>
<arg line="--set ON_ERROR_STOP=1"/>
@@ -240,6 +251,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="-1"/>
<arg line="--set ON_ERROR_STOP=1"/>
@@ -252,6 +264,7 @@
<env key="PGPASSWORD" value="${dbpassword}"/>
<arg line="-q"/>
<arg line="-h '${dbhost}'"/>
+ <arg line="-p '${dbport}'"/>
<arg line="-U '${dbuser}'"/>
<arg line="-1"/>
<arg line="--set ON_ERROR_STOP=1"/>
@@ -290,24 +303,28 @@
<exec executable="mysql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="--execute='create database ${dbname} character set utf8mb4 collate utf8mb4_bin'"/>
</exec>
<exec executable="mysql" input="database/mysql/schema.sql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="${dbname}"/>
</exec>
<exec executable="mysql" input="database/mysql/images.sql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="${dbname}"/>
</exec>
<exec executable="mysql" input="database/mysql/data.sql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="${dbname}"/>
</exec>
@@ -318,6 +335,7 @@
<exec executable="mysql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="--execute='drop database if exists ${dbname}'"/>
</exec>
@@ -328,18 +346,21 @@
<exec executable="mysql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="--execute='drop database if exists ${dbname}_proxy'"/>
</exec>
<exec executable="mysql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="--execute='create database ${dbname}_proxy character set utf8mb4 collate utf8mb4_bin'"/>
</exec>
<exec executable="mysql" input="database/mysql/schema.sql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="${dbname}_proxy"/>
</exec>
@@ -349,6 +370,7 @@
<exec executable="mysql" input="ui/tests/selenium/data/data_test.sql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="${dbname}"/>
</exec>
@@ -358,6 +380,7 @@
<exec executable="mysql" input="ui/tests/api_json/data/data_test.sql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="${dbname}"/>
</exec>
@@ -367,6 +390,7 @@
<exec executable="mysql" input="ui/tests/integration/data/data_test.sql" failonerror="on">
<env key="MYSQL_PWD" value="${dbpassword}"/>
<arg line="--host=${dbhost}"/>
+ <arg line="--port=${dbport}"/>
<arg line="--user=${dbuser}"/>
<arg line="${dbname}"/>
</exec>
diff --git a/configure.ac b/configure.ac
index 1a3e517ab2d..dc5da59a5d3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -19,7 +19,7 @@ dnl
dnl Process this file with autoconf to produce a configure script.
-AC_INIT([Zabbix],[6.0.5])
+AC_INIT([Zabbix],[6.0.6rc1])
AC_CONFIG_SRCDIR(src/zabbix_server/server.c)
AM_INIT_AUTOMAKE([subdir-objects tar-pax])
@@ -153,6 +153,9 @@ AC_CHECK_HEADERS(libperfstat.h, [], [], [
#endif
])
+dnl Kluge for building wihout pkg-config
+m4_ifdef([PKG_PROG_PKG_CONFIG], [], [ AC_DEFUN([PKG_PROG_PKG_CONFIG], []) ])
+
dnl *****************************************************************
dnl * *
dnl * Checks for libraries *
@@ -1320,6 +1323,10 @@ AC_ARG_ENABLE(static-libs,[ --enable-static-libs Build statically linked bin
*) AC_MSG_ERROR([bad value ${enableval} for --enable-static-libs]) ;;
esac])
+if test "x#static_linking" = "xyes" && test "x$static_linking_libs" = "xyes"; then
+ AC_MSG_ERROR([cannot use --static and --static-libs at the same time])
+fi
+
AC_ARG_ENABLE(server,[ --enable-server Turn on build of Zabbix server],
[case "${enableval}" in
yes) server=yes ;;
@@ -1543,9 +1550,12 @@ if test "x$server" = "xyes" || test "x$proxy" = "xyes"; then
have_libxml2="yes"
fi
fi
-
- LDFLAGS="$LDFLAGS $LIBXML2_LDFLAGS"
- LIBS="$LIBS $LIBXML2_LIBS"
+
+ SERVER_LDFLAGS="$SERVER_LDFLAGS $LIBXML2_LDFLAGS"
+ SERVER_LIBS="$SERVER_LIBS $LIBXML2_LIBS"
+
+ PROXY_LDFLAGS="$PROXY_LDFLAGS $LIBXML2_LDFLAGS"
+ PROXY_LIBS="$PROXY_LIBS $LIBXML2_LIBS"
AC_SUBST(LIBXML2_CFLAGS)
@@ -1871,36 +1881,22 @@ ZBXJS_LDFLAGS="$ZBXJS_LDFLAGS $LIBCURL_LDFLAGS"
ZBXJS_LIBS="$ZBXJS_LIBS $LIBCURL_LIBS"
if test "x$server" = "xyes" || test "x$proxy" = "xyes" || test "x$agent" = "xyes" || test "x$agent2" = "xyes"; then
-dnl Check for libpcre or libpcre2, used by Zabbix for regular expressions
-dnl Must check for both pcre and pcre2 to check if both were selected by the user and produce an error in that case
-LIBPCRE_CHECK_CONFIG([no])
-if test "x$enable_static_libs" != "xyes" || test "x$found_libpcre" != "xyes"; then
-LIBPCRE2_CHECK_CONFIG([no])
-fi
-if test "x$want_libpcre2" = "xyes"; then
- if test "x$want_libpcre" = "xyes"; then
- AC_MSG_ERROR([Cannot use both pcre and pcre2 at the same time!])
- fi
+LIBPCRE_CHECK_CONFIG([flags-only])
+LIBPCRE2_CHECK_CONFIG([flags-only])
- if test "x$found_libpcre2" != "xyes"; then
- AC_MSG_ERROR([Unable to use libpcre2 (libpcre2 check failed)])
+if test "x$want_libpcre2" = "xno"; then # default to old pcre
+ if test "x$want_libpcre" = "xno"; then
+ AC_MSG_NOTICE([using old pcre library by default])
fi
- CFLAGS="$CFLAGS $LIBPCRE2_CFLAGS"
- LDFLAGS="$LDFLAGS $LIBPCRE2_LDFLAGS"
- if test "x$ARCH" = "xosx"; then
- LIBS="$LIBPCRE2_LIBS $LIBS"
- else
- LIBS="$LIBS $LIBPCRE2_LIBS"
- fi
+ LIBPCRE_CHECK_CONFIG([mandatory])
- AC_DEFINE([HAVE_PCRE2_H], 1, [Define to 1 if you have the 'libpcre2' library (-lpcre2-8)])
- have_pcre2=1
-else
if test "x$found_libpcre" != "xyes"; then
- AC_MSG_ERROR([Unable to use libpcre (libpcre check failed)])
+ AC_MSG_ERROR([unable to use libpcre (libpcre check failed)])
fi
+ LIBPCRE_CHECK_CONFIG([no])
+
CFLAGS="$CFLAGS $LIBPCRE_CFLAGS"
LDFLAGS="$LDFLAGS $LIBPCRE_LDFLAGS"
if test "x$ARCH" = "xosx"; then
@@ -1911,6 +1907,31 @@ else
AC_DEFINE([HAVE_PCRE_H], 1, [Define to 1 if you have the 'libpcre' library (-lpcre)])
have_pcre=1
+else
+ if test "x$want_libpcre" != "xno"; then
+ AC_MSG_ERROR([cannot use both pcre and pcre2 at the same time])
+ fi
+
+ LIBPCRE2_CHECK_CONFIG([mandatory])
+
+ if test "x$found_libpcre2" != "xyes"; then
+ AC_MSG_ERROR([unable to use libpcre2 (libpcre2 check failed)])
+ fi
+
+ CFLAGS="$CFLAGS $LIBPCRE2_CFLAGS"
+ LDFLAGS="$LDFLAGS $LIBPCRE2_LDFLAGS"
+ if test "x$ARCH" = "xosx"; then
+ LIBS="$LIBPCRE2_LIBS $LIBS"
+ else
+ LIBS="$LIBS $LIBPCRE2_LIBS"
+ fi
+
+ AC_DEFINE([HAVE_PCRE2_H], 1, [Define to 1 if you have the 'libpcre2' library (-lpcre2-8)])
+ have_pcre2=1
+fi
+
+if test "x$have_pcre" != "x1" && test "x$have_pcre2" != "x1"; then
+ AC_MSG_ERROR([cannot build with libpcre or libpcre2])
fi
fi
@@ -2219,6 +2240,14 @@ if test "x$DB_CFLAGS" != "x"; then
echo " database: ${DB_CFLAGS}"
fi
+if test "x$LIBPCRE_CFLAGS" != "x"; then
+ echo " libpcre: ${LIBPCRE_CFLAGS}"
+fi
+
+if test "x$LIBPCRE2_CFLAGS" != "x"; then
+ echo " libpcre2: ${LIBPCRE2_CFLAGS}"
+fi
+
if test "x$LIBXML2_CFLAGS" != "x"; then
echo " libXML2: ${LIBXML2_CFLAGS}"
fi
diff --git a/create/src/schema.tmpl b/create/src/schema.tmpl
index 3877a92200f..5f1760bb4eb 100644
--- a/create/src/schema.tmpl
+++ b/create/src/schema.tmpl
@@ -1936,4 +1936,4 @@ TABLE|dbversion|dbversionid|
FIELD |dbversionid |t_id | |NOT NULL |0
FIELD |mandatory |t_integer |'0' |NOT NULL |
FIELD |optional |t_integer |'0' |NOT NULL |
-ROW |1 |6000000 |6000002
+ROW |1 |6000000 |6000004
diff --git a/include/common.h b/include/common.h
index 513d00d84bc..5173482c53d 100644
--- a/include/common.h
+++ b/include/common.h
@@ -1253,11 +1253,6 @@ void xml_free_data_dyn(char **data);
char *xml_escape_dyn(const char *data);
void xml_escape_xpath(char **data);
-int comms_parse_response(char *xml, char *host, size_t host_len, char *key, size_t key_len,
- char *data, size_t data_len, char *lastlogsize, size_t lastlogsize_len,
- char *timestamp, size_t timestamp_len, char *source, size_t source_len,
- char *severity, size_t severity_len);
-
/* misc functions */
int is_ip6(const char *ip);
int is_ip4(const char *ip);
@@ -1360,8 +1355,6 @@ int zbx_is_utf8(const char *text);
#define ZBX_UTF8_REPLACE_CHAR '?'
void zbx_replace_invalid_utf8(char *text);
-int zbx_cesu8_to_utf8(const char *cesu8, char **utf8);
-
void dos2unix(char *str);
int str2uint64(const char *str, const char *suffixes, zbx_uint64_t *value);
double str2double(const char *str);
diff --git a/include/version.h b/include/version.h
index 8b180e036cf..6edf9e94b94 100644
--- a/include/version.h
+++ b/include/version.h
@@ -27,7 +27,7 @@
#define ZABBIX_REVDATE "30 May 2022"
#define ZABBIX_VERSION_MAJOR 6
#define ZABBIX_VERSION_MINOR 0
-#define ZABBIX_VERSION_PATCH 5
+#define ZABBIX_VERSION_PATCH 6
#ifndef ZABBIX_VERSION_REVISION
# define ZABBIX_VERSION_REVISION {ZABBIX_REVISION}
#endif
@@ -36,7 +36,7 @@
# define ZABBIX_VERSION_RC_NUM {ZABBIX_RC_NUM}
# endif
#endif
-#define ZABBIX_VERSION_RC ""
+#define ZABBIX_VERSION_RC "rc1"
#define ZABBIX_VERSION ZBX_STR(ZABBIX_VERSION_MAJOR) "." ZBX_STR(ZABBIX_VERSION_MINOR) "." \
ZBX_STR(ZABBIX_VERSION_PATCH) ZABBIX_VERSION_RC
#define ZABBIX_REVISION ZBX_STR(ZABBIX_VERSION_REVISION)
diff --git a/m4/libmodbus.m4 b/m4/libmodbus.m4
index ad03bd6044d..7700f71813b 100644
--- a/m4/libmodbus.m4
+++ b/m4/libmodbus.m4
@@ -91,16 +91,18 @@ AC_HELP_STRING([--with-libmodbus@<:@=DIR@:>@],[use MODBUS package @<:@default=no
if test "x$want_libmodbus" = "xyes"; then
AC_REQUIRE([PKG_PROG_PKG_CONFIG])
- PKG_PROG_PKG_CONFIG()
+ m4_ifdef([PKG_PROG_PKG_CONFIG], [PKG_PROG_PKG_CONFIG()], [:])
test -z "$PKG_CONFIG" && AC_MSG_ERROR([Not found pkg-config library])
m4_pattern_allow([^PKG_CONFIG_LIBDIR$])
if test "x$_libmodbus_dir" = "xno"; then
- PKG_CHECK_EXISTS(libmodbus,[
- LIBMODBUS_LIBS=`$PKG_CONFIG --libs libmodbus`
- ],[
- AC_MSG_ERROR([Not found libmodbus package])
- ])
+ m4_ifdef([PKG_CHECK_EXISTS], [
+ PKG_CHECK_EXISTS(libmodbus,[
+ LIBMODBUS_LIBS=`$PKG_CONFIG --libs libmodbus`
+ ],[
+ AC_MSG_ERROR([Not found libmodbus package])
+ ])
+ ], [:])
LIBMODBUS_CFLAGS=`$PKG_CONFIG --cflags libmodbus`
LIBMODBUS_LDFLAGS=""
_libmodbus_version=`$PKG_CONFIG --modversion libmodbus`
diff --git a/m4/libopenssl.m4 b/m4/libopenssl.m4
index fedac99dc3c..d2ab7939d4c 100644
--- a/m4/libopenssl.m4
+++ b/m4/libopenssl.m4
@@ -108,8 +108,10 @@ AC_HELP_STRING([--with-openssl@<:@=DIR@:>@],[use OpenSSL package @<:@default=no@
if test "x$enable_static_libs" = "xyes"; then
test "x$static_linking_support" = "xno" -a -z "$_libopenssl_dir_lib" && AC_MSG_ERROR(["OpenSSL: Compiler not support statically linked libs from default folders"])
AC_REQUIRE([PKG_PROG_PKG_CONFIG])
- PKG_PROG_PKG_CONFIG()
+ m4_ifdef([PKG_PROG_PKG_CONFIG], [PKG_PROG_PKG_CONFIG()], [:])
test -z "$PKG_CONFIG" -a -z "$_libopenssl_dir_lib" && AC_MSG_ERROR([Not found pkg-config library])
+ _libopenssl_dir_lib_64="$_libopenssl_dir_lib/64"
+ test -d "$_libopenssl_dir_lib_64" && _libopenssl_dir_lib="$_libopenssl_dir_lib_64"
m4_pattern_allow([^PKG_CONFIG_LIBDIR$])
fi
@@ -137,6 +139,8 @@ AC_HELP_STRING([--with-openssl@<:@=DIR@:>@],[use OpenSSL package @<:@default=no@
if test -d $_libopenssl_dir/lib64; then
OPENSSL_LDFLAGS=-L$_libopenssl_dir/lib64
+ elif test -d $_libopenssl_dir/lib/64; then
+ OPENSSL_LDFLAGS=-L$_libopenssl_dir/lib/64
else
OPENSSL_LDFLAGS=-L$_libopenssl_dir/lib
fi
@@ -160,13 +164,16 @@ AC_HELP_STRING([--with-openssl@<:@=DIR@:>@],[use OpenSSL package @<:@default=no@
OPENSSL_LIBS="$_libopenssl_dir_lib/libssl.a $_libopenssl_dir_lib/libcrypto.a"
elif test "x$enable_static_libs" = "xyes"; then
if test -z "$_libopenssl_dir_lib"; then
- PKG_CHECK_EXISTS(openssl,[
- OPENSSL_LIBS=`$PKG_CONFIG --static --libs openssl`
- ],[
- AC_MSG_ERROR([Not found openssl package])
- ])
+ m4_ifdef([PKG_CHECK_EXISTS], [
+ PKG_CHECK_EXISTS(openssl,[
+ OPENSSL_LIBS=`$PKG_CONFIG --static --libs openssl`
+ ],[
+ AC_MSG_ERROR([Not found openssl package])
+ ])
+ ], [:])
else
- AC_RUN_LOG([PKG_CONFIG_LIBDIR="$_libopenssl_dir_lib/pkgconfig" $PKG_CONFIG --exists --print-errors openssl]) || AC_MSG_ERROR(["Not found openssl package in $_libopenssl_dir/lib/pkgconfig"])
+ AC_RUN_LOG([PKG_CONFIG_LIBDIR="$_libopenssl_dir_lib/pkgconfig" $PKG_CONFIG --exists --print-errors openssl]) ||
+ AC_MSG_ERROR(["Not found openssl package in $_libopenssl_dir_lib/pkgconfig"])
OPENSSL_LIBS=`PKG_CONFIG_LIBDIR="$_libopenssl_dir_lib/pkgconfig" $PKG_CONFIG --static --libs openssl`
test -z "$OPENSSL_LIBS" && OPENSSL_LIBS=`PKG_CONFIG_LIBDIR="$_libopenssl_dir_lib/pkgconfig" $PKG_CONFIG --libs openssl`
fi
diff --git a/m4/libxml2.m4 b/m4/libxml2.m4
index cd6f79c126d..6e3714a31a6 100644
--- a/m4/libxml2.m4
+++ b/m4/libxml2.m4
@@ -50,7 +50,7 @@ AC_HELP_STRING([--with-libxml2@<:@=ARG@:>@],
if test "$want_libxml2" = "yes"; then
AC_REQUIRE([PKG_PROG_PKG_CONFIG])
- PKG_PROG_PKG_CONFIG()
+ m4_ifdef([PKG_PROG_PKG_CONFIG], [PKG_PROG_PKG_CONFIG()], [:])
if test -x "$PKG_CONFIG"; then
diff --git a/m4/pcre.m4 b/m4/pcre.m4
index 05469c2882f..f387a227b73 100644
--- a/m4/pcre.m4
+++ b/m4/pcre.m4
@@ -28,140 +28,243 @@ found_libpcre="yes")
AC_DEFUN([LIBPCRE_CHECK_CONFIG],
[
+ want_libpcre=no
+ found_libpcre=no
+ libpcre_dir=""
+ libpcre_include_dir=""
+ libpcre_lib_dir=""
+
+ #
+ # process --with-* flags
+ #
+
AC_ARG_WITH([libpcre],[
If you want to specify libpcre installation directories:
AC_HELP_STRING([--with-libpcre@<:@=DIR@:>@], [use libpcre from given base install directory (DIR), default is to search through a number of common places for the libpcre files.])],
- [
- if test "$withval" = "yes"; then
- want_libpcre=yes
- if test -f /usr/local/include/pcre.h; then
- withval="/usr/local"
- else
- withval="/usr"
- fi
- else
- want_libpcre=no
- _libpcre_dir_lib="$withval/lib"
+ [
+ if test "$withval" != "no"; then
+ want_libpcre=yes
+ if test "$withval" != "yes"; then
+ libpcre_dir="$withval"
fi
- _libpcre_dir="$withval"
- test "x$withval" = "xyes" && withval=/usr
- LIBPCRE_CFLAGS="-I$withval/include"
- LIBPCRE_LDFLAGS="-L$withval/lib"
- _libpcre_dir_set="yes"
- ]
- )
-
- AC_ARG_WITH([libpcre-include],
- AC_HELP_STRING([--with-libpcre-include@<:@=DIR@:>@],
- [use libpcre include headers from given path.]
- ),
- [
- LIBPCRE_CFLAGS="-I$withval"
- _libpcre_dir_set="yes"
- ]
- )
-
- AC_ARG_WITH([libpcre-lib],
- AC_HELP_STRING([--with-libpcre-lib@<:@=DIR@:>@],
- [use libpcre libraries from given path.]
- ),
- [
- _libpcre_dir="$withval"
- _libpcre_dir_lib="$withval"
- LIBPCRE_LDFLAGS="-L$withval"
- _libpcre_dir_set="yes"
- ]
- )
-
- if test "x$enable_static_libs" = "xyes"; then
+ fi
+ ])
+
+ AC_ARG_WITH([libpcre-include], AC_HELP_STRING([--with-libpcre-include@<:@=DIR@:>@], [use libpcre include headers from given path.]), [
+ want_libpcre="yes"
+ libpcre_include_dir="$withval"
+ if ! test -d "$libpcre_include_dir"; then
+ AC_MSG_ERROR([cannot find $libpcre_include_dir directory])
+ fi
+ if ! test -f "$libpcre_include_dir/pcre.h"; then
+ AC_MSG_ERROR([cannot find $libpcre_include_dir/pcre.h])
+ fi
+ ])
+
+ AC_ARG_WITH([libpcre-lib], AC_HELP_STRING([--with-libpcre-lib@<:@=DIR@:>@], [use libpcre libraries from given path.]), [
+ want_libpcre="yes"
+ libpcre_lib_dir="$withval"
+ if ! test -d "$libpcre_lib_dir"; then
+ AC_MSG_ERROR([cannot find $libpcre_lib_dir directory])
+ fi
+ ])
+
+
+ #
+ # find actual compiler flags and include paths
+ #
+
+ if test "$1" != "flags-only"; then
AC_REQUIRE([PKG_PROG_PKG_CONFIG])
- PKG_PROG_PKG_CONFIG()
- test -z "$PKG_CONFIG" -a -z "$_libpcre_dir_lib" && AC_MSG_ERROR([Not found pkg-config library])
- m4_pattern_allow([^PKG_CONFIG_LIBDIR$])
- fi
+ m4_ifdef([PKG_PROG_PKG_CONFIG], [PKG_PROG_PKG_CONFIG()], [:])
- AC_MSG_CHECKING(for libpcre support)
+ if test -n "$PKG_CONFIG"; then
+ #
+ # got pkg-config, use that
+ #
- LIBPCRE_LIBS="-lpcre"
+ m4_pattern_allow([^PKG_CONFIG_LIBDIR$])
- if test "x$enable_static" = "xyes"; then
- LIBPCRE_LIBS=" $LIBPCRE_LIBS -lpthread"
- elif test "x$enable_static_libs" = "xyes" -a -z "$PKG_CONFIG"; then
- LIBPCRE_LIBS="$_libpcre_dir_lib/libpcre.a"
- elif test "x$enable_static_libs" = "xyes"; then
+ if test -n "$libpcre_lib_dir"; then
+ export PKG_CONFIG_LIBDIR="$libpcre_lib_dir/pkgconfig"
+ elif test -n "$libpcre_dir"; then
+ export PKG_CONFIG_LIBDIR="$libpcre_dir/lib/pkgconfig"
+ fi
- test "x$static_linking_support" = "xno" -a -z "$_libpcre_dir_lib" && AC_MSG_ERROR(["Compiler not support statically linked libs from default folders"])
+ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors libpcre]) || {
+ AC_MSG_ERROR([cannot find pkg-config package for libpcre])
+ }
- if test -z "$_libpcre_dir_lib"; then
- PKG_CHECK_EXISTS(libpcre,[
- LIBPCRE_LIBS=`$PKG_CONFIG --static --libs libpcre`
- ],[
- AC_MSG_ERROR([Not found libpcre package])
- ])
- else
- AC_RUN_LOG([PKG_CONFIG_LIBDIR="$_libpcre_dir_lib/pkgconfig" $PKG_CONFIG --exists --print-errors libpcre]) || AC_MSG_ERROR(["Not found libpcre package in $_libpcre_dir/lib/pkgconfig"])
- LIBPCRE_LIBS=`PKG_CONFIG_LIBDIR="$_libpcre_dir_lib/pkgconfig" $PKG_CONFIG --static --libs libpcre`
- test -z "$LIBPCRE_LIBS" && LIBPCRE_LIBS=`PKG_CONFIG_LIBDIR="$_libpcre_dir_lib/pkgconfig" $PKG_CONFIG --libs libpcre`
- fi
+ if test -n "$libpcre_include_dir"; then
+ LIBPCRE_CFLAGS="-I$libpcre_include_dir"
+ else
+ LIBPCRE_CFLAGS=`$PKG_CONFIG --cflags libpcre`
+ fi
+
+ LIBPCRE_LDFLAGS=`$PKG_CONFIG --libs-only-L libpcre`
+ LIBPCRE_LIBS=`$PKG_CONFIG --libs-only-l libpcre`
+
+ unset PKG_CONFIG_LIBDIR
- if test "x$static_linking_support" = "xno"; then
- LIBPCRE_LIBS=`echo "$LIBPCRE_LIBS"|sed "s|-lpcre|$_libpcre_dir_lib/libpcre.a|g"`
+ found_libpcre="yes"
else
- LIBPCRE_LIBS=`echo "$LIBPCRE_LIBS"|sed "s/-lpcre/${static_linking_support}static -lpcre ${static_linking_support}dynamic/g"`
+ #
+ # no pkg-config, trying to guess
+ #
+
+ AC_MSG_WARN([proceeding without pkg-config])
+
+ LIBPCRE_LIBS="-lpcre"
+
+ if test -n "$libpcre_dir"; then
+ #
+ # directories are given explicitly
+ #
+
+ if test -n "$libpcre_include_dir"; then
+ LIBPCRE_CFLAGS="-I$libpcre_include_dir"
+ else
+ if test -f "$libpcre_dir/include/pcre.h"; then
+ LIBPCRE_CFLAGS="-I$libpcre_dir/include"
+ else
+ AC_MSG_ERROR([cannot find $libpcre_dir/include/pcre.h])
+ fi
+ fi
+
+ if test -n "$libpcre_lib_dir"; then
+ LIBPCRE_LDFLAGS="-L$libpcre_lib_dir"
+ else
+ if test -d "$libpcre_dir/lib"; then
+ LIBPCRE_LDFLAGS="-L$libpcre_dir/lib"
+ else
+ AC_MSG_ERROR([cannot find $libpcre_dir/lib])
+ fi
+ fi
+
+ found_libpcre="yes"
+ elif test -n "$libpcre_include_dir"; then
+ LIBPCRE_CFLAGS="-I$libpcre_include_dir"
+
+ if test -n "$libpcre_lib_dir"; then
+ LIBPCRE_LDFLAGS="-L$libpcre_lib_dir"
+ fi
+
+ found_libpcre="yes"
+ elif test -n "$libpcre_lib_dir"; then
+ LIBPCRE_LDFLAGS="-L$libpcre_lib_dir"
+
+ found_libpcre="yes"
+ else
+ #
+ # search default directories
+ #
+
+ if test -f /usr/include/pcre.h; then
+ found_libpcre="yes"
+ elif test -f /usr/local/include/pcre.h; then
+ LIBPCRE_CFLAGS="-I/usr/local/include"
+ LIBPCRE_LDFLAGS="-L/usr/local/lib"
+
+ found_libpcre="yes"
+ elif test -f /usr/pkg/include/pcre.h; then
+ LIBPCRE_CFLAGS="-I/usr/pkg/include"
+ LIBPCRE_LDFLAGS="-L/usr/pkg/lib"
+ LIBPCRE_LDFLAGS="$LIBPCRE_LDFLAGS -Wl,-R/usr/pkg/lib"
+
+ found_libpcre="yes"
+ elif test -f /opt/csw/include/pcre.h; then
+ LIBPCRE_CFLAGS="-I/opt/csw/include"
+ LIBPCRE_LDFLAGS="-L/opt/csw/lib"
+
+ if $(echo "$CFLAGS"|grep -q -- "-m64") ; then
+ LIBPCRE_LDFLAGS="$LIBPCRE_LDFLAGS/64 -Wl,-R/opt/csw/lib/64"
+ else
+ LIBPCRE_LDFLAGS="$LIBPCRE_LDFLAGS -Wl,-R/opt/csw/lib"
+ fi
+
+ found_libpcre="yes"
+ else
+ found_libpcre="no"
+ fi
+ fi
fi
- fi
- if test -n "$_libpcre_dir_set" -o -f /usr/include/pcre.h; then
- found_libpcre="yes"
- elif test -f /usr/local/include/pcre.h; then
- LIBPCRE_CFLAGS="-I/usr/local/include"
- LIBPCRE_LDFLAGS="-L/usr/local/lib"
- found_libpcre="yes"
- elif test -f /usr/pkg/include/pcre.h; then
- LIBPCRE_CFLAGS="-I/usr/pkg/include"
- LIBPCRE_LDFLAGS="-L/usr/pkg/lib"
- LIBPCRE_LDFLAGS="$LIBPCRE_LDFLAGS -Wl,-R/usr/pkg/lib"
- found_libpcre="yes"
- elif test -f /opt/csw/include/pcre.h; then
- LIBPCRE_CFLAGS="-I/opt/csw/include"
- LIBPCRE_LDFLAGS="-L/opt/csw/lib"
- if $(echo "$CFLAGS"|grep -q -- "-m64") ; then
- LIBPCRE_LDFLAGS="$LIBPCRE_LDFLAGS/64 -Wl,-R/opt/csw/lib/64"
- else
- LIBPCRE_LDFLAGS="$LIBPCRE_LDFLAGS -Wl,-R/opt/csw/lib"
+
+ #
+ # process --enable-static and --enable_static-libs flags
+ #
+
+ if test "x$enable_static" = "xyes"; then
+ LIBPCRE_LIBS=" $LIBPCRE_LIBS -lpthread"
+ elif test "x$enable_static_libs" = "xyes"; then
+ if test "x$static_linking_support" == "xno"; then
+ AC_MSG_WARN([compiler has no direct suppor for static linkage])
+
+ if test -n "$libpcre_lib_dir"; then
+ if test -f "$libpcre_lib_dir/libpcre.a"; then
+ LIBPCRE_LIBS="$libpcre_lib_dir/libpcre.a"
+ else
+ AC_MSG_ERROR([cannot find $libpcre_lib_dir/libpcre.a])
+ fi
+ elif test -n "$libpcre_dir"; then
+ if test -f "$libpcre_dir/lib/libpcre.a"; then
+ LIBPCRE_LIBS="$libpcre_dir/lib/libpcre.a"
+ else
+ AC_MSG_ERROR([cannot find $libpcre_dir/lib/libpcre.a])
+ fi
+ else
+ AC_MSG_ERROR([libpcre directory must be given explicitly in this case])
+ fi
+ else
+ LIBPCRE_LIBS="$LIBPCRE_LDFLAGS ${static_linking_support}static $LIBPCRE_LIBS ${static_linking_support}dynamic"
+ LIBPCRE_LDFLAGS=""
+ fi
fi
- found_libpcre="yes"
- else
- found_libpcre="no"
- AC_MSG_RESULT(no)
- fi
- if test "x$found_libpcre" = "xyes"; then
- am_save_CFLAGS="$CFLAGS"
- am_save_LDFLAGS="$LDFLAGS"
- am_save_LIBS="$LIBS"
- CFLAGS="$CFLAGS $LIBPCRE_CFLAGS"
- LDFLAGS="$LDFLAGS $LIBPCRE_LDFLAGS"
- LIBS="$LIBS $LIBPCRE_LIBS"
+ #
+ # try building with pcre
+ #
- found_libpcre="no"
- LIBPCRE_TRY_LINK([no])
+ AC_MSG_CHECKING([for libpcre support])
- CFLAGS="$am_save_CFLAGS"
- LDFLAGS="$am_save_LDFLAGS"
- LIBS="$am_save_LIBS"
- fi
+ if test "x$found_libpcre" = "xyes"; then
+ am_save_CFLAGS="$CFLAGS"
+ am_save_LDFLAGS="$LDFLAGS"
+ am_save_LIBS="$LIBS"
- if test "x$found_libpcre" = "xyes"; then
- AC_MSG_RESULT(yes)
- else
- LIBPCRE_CFLAGS=""
- LIBPCRE_LDFLAGS=""
- LIBPCRE_LIBS=""
- fi
+ CFLAGS="$CFLAGS $LIBPCRE_CFLAGS"
+ LDFLAGS="$LDFLAGS $LIBPCRE_LDFLAGS"
+ LIBS="$LIBS $LIBPCRE_LIBS"
+
+ found_libpcre="no"
+ LIBPCRE_TRY_LINK([no])
+
+ if test "x$found_libpcre" = "xyes"; then
+ AC_MSG_RESULT(yes)
+ else
+ AC_MSG_RESULT(no)
+ if test "$1" = "mandatory"; then
+ AC_MSG_NOTICE([CFLAGS: $CFLAGS])
+ AC_MSG_NOTICE([LDFLAGS: $LDFLAGS])
+ AC_MSG_NOTICE([LIBS: $LIBS])
+ AC_MSG_ERROR([cannot build with libpcre])
+ else
+ LIBPCRE_CFLAGS=""
+ LIBPCRE_LDFLAGS=""
+ LIBPCRE_LIBS=""
+ fi
+ fi
- AC_SUBST(LIBPCRE_CFLAGS)
- AC_SUBST(LIBPCRE_LDFLAGS)
- AC_SUBST(LIBPCRE_LIBS)
+ CFLAGS="$am_save_CFLAGS"
+ LDFLAGS="$am_save_LDFLAGS"
+ LIBS="$am_save_LIBS"
+ else
+ AC_MSG_RESULT(no)
+ fi
+
+ AC_SUBST(LIBPCRE_CFLAGS)
+ AC_SUBST(LIBPCRE_LDFLAGS)
+ AC_SUBST(LIBPCRE_LIBS)
+ fi
])dnl
diff --git a/m4/pcre2.m4 b/m4/pcre2.m4
index 436b98de4ae..3b85f475eb5 100644
--- a/m4/pcre2.m4
+++ b/m4/pcre2.m4
@@ -29,140 +29,243 @@ found_libpcre2="yes")
AC_DEFUN([LIBPCRE2_CHECK_CONFIG],
[
+ want_libpcre2=no
+ found_libpcre2=no
+ libpcre2_dir=""
+ libpcre2_include_dir=""
+ libpcre2_lib_dir=""
+
+ #
+ # process --with-* flags
+ #
+
AC_ARG_WITH([libpcre2],[
If you want to specify libpcre2 installation directories:
AC_HELP_STRING([--with-libpcre2@<:@=DIR@:>@], [use libpcre2 from given base install directory (DIR), default is to search through a number of common places for the libpcre2 files.])],
- [
- if test "$withval" = "yes"; then
- want_libpcre2=yes
- if test -f /usr/local/include/pcre2.h; then
- withval="/usr/local"
- else
- withval="/usr"
- fi
- else
- want_libpcre2=no
- _libpcre2_dir_lib="$withval/lib"
+ [
+ if test "$withval" != "no"; then
+ want_libpcre2=yes
+ if test "$withval" != "yes"; then
+ libpcre2_dir="$withval"
fi
- _libpcre2_dir="$withval"
- test "x$withval" = "xyes" && withval=/usr
- LIBPCRE2_CFLAGS="-I$withval/include"
- LIBPCRE2_LDFLAGS="-L$withval/lib"
- _libpcre2_dir_set="yes"
- ]
- )
-
- AC_ARG_WITH([libpcre2-include],
- AC_HELP_STRING([--with-libpcre2-include@<:@=DIR@:>@],
- [use libpcre2 include headers from given path.]
- ),
- [
- LIBPCRE2_CFLAGS="-I$withval"
- _libpcre2_dir_set="yes"
- ]
- )
-
- AC_ARG_WITH([libpcre2-lib],
- AC_HELP_STRING([--with-libpcre2-lib@<:@=DIR@:>@],
- [use libpcre2 libraries from given path.]
- ),
- [
- _libpcre2_dir="$withval"
- _libpcre2_dir_lib="$withval"
- LIBPCRE2_LDFLAGS="-L$withval"
- _libpcre2_dir_set="yes"
- ]
- )
-
- if test "x$enable_static_libs" = "xyes"; then
+ fi
+ ])
+
+ AC_ARG_WITH([libpcre2-include], AC_HELP_STRING([--with-libpcre2-include@<:@=DIR@:>@], [use libpcre2 include headers from given path.]), [
+ want_libpcre2="yes"
+ libpcre2_include_dir="$withval"
+ if ! test -d "$libpcre2_include_dir"; then
+ AC_MSG_ERROR([cannot find $libpcre2_include_dir directory])
+ fi
+ if ! test -f "$libpcre2_include_dir/pcre2.h"; then
+ AC_MSG_ERROR([cannot find $libpcre2_include_dir/pcre2.h])
+ fi
+ ])
+
+ AC_ARG_WITH([libpcre2-lib], AC_HELP_STRING([--with-libpcre2-lib@<:@=DIR@:>@], [use libpcre2 libraries from given path.]), [
+ want_libpcre2="yes"
+ libpcre2_lib_dir="$withval"
+ if ! test -d "$libpcre2_lib_dir"; then
+ AC_MSG_ERROR([cannot find $libpcre2_lib_dir directory])
+ fi
+ ])
+
+
+ #
+ # find actual compiler flags and include paths
+ #
+
+ if test "$1" != "flags-only"; then
AC_REQUIRE([PKG_PROG_PKG_CONFIG])
- PKG_PROG_PKG_CONFIG()
- test -z "$PKG_CONFIG" -a -z "$_libpcre2_dir_lib" && AC_MSG_ERROR([Not found pkg-config library])
- m4_pattern_allow([^PKG_CONFIG_LIBDIR$])
- fi
+ m4_ifdef([PKG_PROG_PKG_CONFIG], [PKG_PROG_PKG_CONFIG()], [:])
- AC_MSG_CHECKING(for libpcre2 support)
+ if test -n "$PKG_CONFIG"; then
+ #
+ # got pkg-config, use that
+ #
- LIBPCRE2_LIBS="-lpcre2-8"
+ m4_pattern_allow([^PKG_CONFIG_LIBDIR$])
- if test "x$enable_static" = "xyes"; then
- LIBPCRE2_LIBS=" $LIBPCRE2_LIBS -lpthread"
- elif test "x$enable_static_libs" = "xyes" -a -z "$PKG_CONFIG"; then
- LIBPCRE2_LIBS="$_libpcre2_dir_lib/libpcre2.a"
- elif test "x$enable_static_libs" = "xyes"; then
+ if test -n "$libpcre2_lib_dir"; then
+ export PKG_CONFIG_LIBDIR="$libpcre2_lib_dir/pkgconfig"
+ elif test -n "$libpcre2_dir"; then
+ export PKG_CONFIG_LIBDIR="$libpcre2_dir/lib/pkgconfig"
+ fi
- test "x$static_linking_support" = "xno" -a -z "$_libpcre2_dir_lib" && AC_MSG_ERROR(["Compiler not support statically linked libs from default folders"])
+ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors libpcre2-8]) || {
+ AC_MSG_ERROR([cannot find pkg-config package for libpcre2])
+ }
- if test -z "$_libpcre2_dir_lib"; then
- PKG_CHECK_EXISTS(libpcre2,[
- LIBPCRE2_LIBS=`$PKG_CONFIG --static --libs libpcre2`
- ],[
- AC_MSG_ERROR([Not found libpcre2 package])
- ])
- else
- AC_RUN_LOG([PKG_CONFIG_LIBDIR="$_libpcre2_dir_lib/pkgconfig" $PKG_CONFIG --exists --print-errors libpcre2]) || AC_MSG_ERROR(["Not found libpcre2 package in $_libpcre2_dir/lib/pkgconfig"])
- LIBPCRE2_LIBS=`PKG_CONFIG_LIBDIR="$_libpcre2_dir_lib/pkgconfig" $PKG_CONFIG --static --libs libpcre2`
- test -z "$LIBPCRE2_LIBS" && LIBPCRE2_LIBS=`PKG_CONFIG_LIBDIR="$_libpcre2_dir_lib/pkgconfig" $PKG_CONFIG --libs libpcre2`
- fi
+ if test -n "$libpcre2_include_dir"; then
+ LIBPCRE2_CFLAGS="-I$libpcre2_include_dir"
+ else
+ LIBPCRE2_CFLAGS=`$PKG_CONFIG --cflags libpcre2-8`
+ fi
+
+ LIBPCRE2_LDFLAGS=`$PKG_CONFIG --libs-only-L libpcre2-8`
+ LIBPCRE2_LIBS=`$PKG_CONFIG --libs-only-l libpcre2-8`
+
+ unset PKG_CONFIG_LIBDIR
- if test "x$static_linking_support" = "xno"; then
- LIBPCRE2_LIBS=`echo "$LIBPCRE2_LIBS"|sed "s|-lpcre2-8|$_libpcre2_dir_lib/libpcre2.a|g"`
+ found_libpcre2="yes"
else
- LIBPCRE2_LIBS=`echo "$LIBPCRE2_LIBS"|sed "s/-lpcre2-8/${static_linking_support}static -lpcre2-8 ${static_linking_support}dynamic/g"`
+ #
+ # no pkg-config, trying to guess
+ #
+
+ AC_MSG_WARN([proceeding without pkg-config])
+
+ LIBPCRE2_LIBS="-lpcre2-8"
+
+ if test -n "$libpcre2_dir"; then
+ #
+ # directories are given explicitly
+ #
+
+ if test -n "$libpcre2_include_dir"; then
+ LIBPCRE2_CFLAGS="-I$libpcre2_include_dir"
+ else
+ if test -f "$libpcre2_dir/include/pcre2.h"; then
+ LIBPCRE2_CFLAGS="-I$libpcre2_dir/include"
+ else
+ AC_MSG_ERROR([cannot find $libpcre2_dir/include/pcre2.h])
+ fi
+ fi
+
+ if test -n "$libpcre2_lib_dir"; then
+ LIBPCRE2_LDFLAGS="-L$libpcre2_lib_dir"
+ else
+ if test -d "$libpcre2_dir/lib"; then
+ LIBPCRE2_LDFLAGS="-L$libpcre2_dir/lib"
+ else
+ AC_MSG_ERROR([cannot find $libpcre2_dir/lib])
+ fi
+ fi
+
+ found_libpcre2="yes"
+ elif test -n "$libpcre2_include_dir"; then
+ LIBPCRE2_CFLAGS="-I$libpcre2_include_dir"
+
+ if test -n "$libpcre2_lib_dir"; then
+ LIBPCRE2_LDFLAGS="-L$libpcre2_lib_dir"
+ fi
+
+ found_libpcre2="yes"
+ elif test -n "$libpcre2_lib_dir"; then
+ LIBPCRE2_LDFLAGS="-L$libpcre2_lib_dir"
+
+ found_libpcre2="yes"
+ else
+ #
+ # search default directories
+ #
+
+ if test -f /usr/include/pcre2.h; then
+ found_libpcre2="yes"
+ elif test -f /usr/local/include/pcre2.h; then
+ LIBPCRE2_CFLAGS="-I/usr/local/include"
+ LIBPCRE2_LDFLAGS="-L/usr/local/lib"
+
+ found_libpcre2="yes"
+ elif test -f /usr/pkg/include/pcre2.h; then
+ LIBPCRE2_CFLAGS="-I/usr/pkg/include"
+ LIBPCRE2_LDFLAGS="-L/usr/pkg/lib"
+ LIBPCRE2_LDFLAGS="$LIBPCRE2_LDFLAGS -Wl,-R/usr/pkg/lib"
+
+ found_libpcre2="yes"
+ elif test -f /opt/csw/include/pcre2.h; then
+ LIBPCRE2_CFLAGS="-I/opt/csw/include"
+ LIBPCRE2_LDFLAGS="-L/opt/csw/lib"
+
+ if $(echo "$CFLAGS"|grep -q -- "-m64") ; then
+ LIBPCRE2_LDFLAGS="$LIBPCRE2_LDFLAGS/64 -Wl,-R/opt/csw/lib/64"
+ else
+ LIBPCRE2_LDFLAGS="$LIBPCRE2_LDFLAGS -Wl,-R/opt/csw/lib"
+ fi
+
+ found_libpcre2="yes"
+ else
+ found_libpcre2="no"
+ fi
+ fi
fi
- fi
- if test -n "$_libpcre2_dir_set" -o -f /usr/include/pcre2.h; then
- found_libpcre2="yes"
- elif test -f /usr/local/include/pcre2.h; then
- LIBPCRE2_CFLAGS="-I/usr/local/include"
- LIBPCRE2_LDFLAGS="-L/usr/local/lib"
- found_libpcre2="yes"
- elif test -f /usr/pkg/include/pcre2.h; then
- LIBPCRE2_CFLAGS="-I/usr/pkg/include"
- LIBPCRE2_LDFLAGS="-L/usr/pkg/lib"
- LIBPCRE2_LDFLAGS="$LIBPCRE2_LDFLAGS -Wl,-R/usr/pkg/lib"
- found_libpcre2="yes"
- elif test -f /opt/csw/include/pcre2.h; then
- LIBPCRE2_CFLAGS="-I/opt/csw/include"
- LIBPCRE2_LDFLAGS="-L/opt/csw/lib"
- if $(echo "$CFLAGS"|grep -q -- "-m64") ; then
- LIBPCRE2_LDFLAGS="$LIBPCRE2_LDFLAGS/64 -Wl,-R/opt/csw/lib/64"
- else
- LIBPCRE2_LDFLAGS="$LIBPCRE2_LDFLAGS -Wl,-R/opt/csw/lib"
+
+ #
+ # process --enable-static and --enable_static-libs flags
+ #
+
+ if test "x$enable_static" = "xyes"; then
+ LIBPCRE2_LIBS=" $LIBPCRE2_LIBS -lpthread"
+ elif test "x$enable_static_libs" = "xyes"; then
+ if test "x$static_linking_support" == "xno"; then
+ AC_MSG_WARN([compiler has no direct suppor for static linkage])
+
+ if test -n "$libpcre2_lib_dir"; then
+ if test -f "$libpcre2_lib_dir/libpcre2-8.a"; then
+ LIBPCRE2_LIBS="$libpcre2_lib_dir/libpcre2-8.a"
+ else
+ AC_MSG_ERROR([cannot find $libpcre2_lib_dir/libpcre2-8.a])
+ fi
+ elif test -n "$libpcre2_dir"; then
+ if test -f "$libpcre2_dir/lib/libpcre2-8.a"; then
+ LIBPCRE2_LIBS="$libpcre2_dir/lib/libpcre2-8.a"
+ else
+ AC_MSG_ERROR([cannot find $libpcre2_dir/lib/libpcre2-8.a])
+ fi
+ else
+ AC_MSG_ERROR([libpcre2 directory must be given explicitly in this case])
+ fi
+ else
+ LIBPCRE2_LIBS="$LIBPCRE2_LDFLAGS ${static_linking_support}static $LIBPCRE2_LIBS ${static_linking_support}dynamic"
+ LIBPCRE2_LDFLAGS=""
+ fi
fi
- found_libpcre2="yes"
- else
- found_libpcre2="no"
- AC_MSG_RESULT(no)
- fi
- if test "x$found_libpcre2" = "xyes"; then
- am_save_CFLAGS="$CFLAGS"
- am_save_LDFLAGS="$LDFLAGS"
- am_save_LIBS="$LIBS"
- CFLAGS="$CFLAGS $LIBPCRE2_CFLAGS"
- LDFLAGS="$LDFLAGS $LIBPCRE2_LDFLAGS"
- LIBS="$LIBS $LIBPCRE2_LIBS"
+ #
+ # try building with pcre2
+ #
- found_libpcre2="no"
- LIBPCRE2_TRY_LINK([no])
+ AC_MSG_CHECKING([for libpcre2 support])
- CFLAGS="$am_save_CFLAGS"
- LDFLAGS="$am_save_LDFLAGS"
- LIBS="$am_save_LIBS"
- fi
+ if test "x$found_libpcre2" = "xyes"; then
+ am_save_CFLAGS="$CFLAGS"
+ am_save_LDFLAGS="$LDFLAGS"
+ am_save_LIBS="$LIBS"
- if test "x$found_libpcre2" = "xyes"; then
- AC_MSG_RESULT(yes)
- else
- LIBPCRE2_CFLAGS=""
- LIBPCRE2_LDFLAGS=""
- LIBPCRE2_LIBS=""
- fi
+ CFLAGS="$CFLAGS $LIBPCRE2_CFLAGS"
+ LDFLAGS="$LDFLAGS $LIBPCRE2_LDFLAGS"
+ LIBS="$LIBS $LIBPCRE2_LIBS"
+
+ found_libpcre2="no"
+ LIBPCRE2_TRY_LINK([no])
+
+ if test "x$found_libpcre2" = "xyes"; then
+ AC_MSG_RESULT(yes)
+ else
+ AC_MSG_RESULT(no)
+ if test "$1" = "mandatory"; then
+ AC_MSG_NOTICE([CFLAGS: $CFLAGS])
+ AC_MSG_NOTICE([LDFLAGS: $LDFLAGS])
+ AC_MSG_NOTICE([LIBS: $LIBS])
+ AC_MSG_ERROR([cannot build with libpcre2])
+ else
+ LIBPCRE2_CFLAGS=""
+ LIBPCRE2_LDFLAGS=""
+ LIBPCRE2_LIBS=""
+ fi
+ fi
- AC_SUBST(LIBPCRE2_CFLAGS)
- AC_SUBST(LIBPCRE2_LDFLAGS)
- AC_SUBST(LIBPCRE2_LIBS)
+ CFLAGS="$am_save_CFLAGS"
+ LDFLAGS="$am_save_LDFLAGS"
+ LIBS="$am_save_LIBS"
+ else
+ AC_MSG_RESULT(no)
+ fi
+
+ AC_SUBST(LIBPCRE2_CFLAGS)
+ AC_SUBST(LIBPCRE2_LDFLAGS)
+ AC_SUBST(LIBPCRE2_LIBS)
+ fi
])dnl
diff --git a/sass/stylesheets/sass/components/_toc.scss b/sass/stylesheets/sass/components/_toc.scss
index b0c9f3fd36b..16fb0ffe0c2 100644
--- a/sass/stylesheets/sass/components/_toc.scss
+++ b/sass/stylesheets/sass/components/_toc.scss
@@ -5,7 +5,7 @@
text-overflow: ellipsis;
white-space: nowrap;
line-height: 14px;
- color: $toc-title-color;
+ color: $link-color;
border-bottom: 1px solid $table-border-color;
background: $toc-title-bg-color;
}
diff --git a/sass/stylesheets/sass/screen.scss b/sass/stylesheets/sass/screen.scss
index 3a983a8ba21..8e840aa9bf8 100644
--- a/sass/stylesheets/sass/screen.scss
+++ b/sass/stylesheets/sass/screen.scss
@@ -6464,7 +6464,7 @@ z-select,
.import-compare {
display: flex;
- max-height: calc(100vh - 190px);
+ max-height: calc(100vh - 220px);
.toc {
flex: 20%;
@@ -6478,6 +6478,11 @@ z-select,
overflow: auto;
border: 1px dashed $form-border-color;
}
+
+ .toc,
+ .diff {
+ @extend %webkit-scrollbar;
+ }
}
.list-dashed {
diff --git a/sonar-project.properties b/sonar-project.properties
new file mode 100644
index 00000000000..7341546fff6
--- /dev/null
+++ b/sonar-project.properties
@@ -0,0 +1,5 @@
+sonar.projectKey=zabbix_zabbix
+sonar.organization=zabbix
+sonar.java.binaries=target
+sonar.java.binaries=**/*.java
+sonar.exclusions=**/*.java, **/*.sql, src/zabbix_agent/zabbix_agentd.c, src/zabbix_server/server.c, src/zabbix_proxy/proxy.c, ui/tests/**/*, ui/tests/*
diff --git a/src/go/cmd/zabbix_agent2/external.go b/src/go/cmd/zabbix_agent2/external.go
index 5146295693a..e9e693d7d39 100644
--- a/src/go/cmd/zabbix_agent2/external.go
+++ b/src/go/cmd/zabbix_agent2/external.go
@@ -24,6 +24,7 @@ import (
"fmt"
"net"
"os"
+ "path/filepath"
"time"
"git.zabbix.com/ap/plugin-support/conf"
@@ -49,6 +50,10 @@ func initExternalPlugins(options *agent.AgentOptions) (string, error) {
continue
}
+ if err := checkPath(o.System.Path); err != nil {
+ return "", err
+ }
+
paths[name] = o.System.Path
}
@@ -116,6 +121,14 @@ func initExternalPlugin(name string, p *external.Plugin, options *agent.AgentOpt
return
}
+func checkPath(path string) error {
+ if !filepath.IsAbs(path) {
+ return fmt.Errorf("failed to start plugin %s, path must be absolute", path)
+ }
+
+ return nil
+}
+
func validate(p *external.Plugin, options interface{}) error {
if !comms.ImplementsConfigurator(p.Interfaces) {
return nil
diff --git a/src/go/cmd/zabbix_agent2/zabbix_agent2.go b/src/go/cmd/zabbix_agent2/zabbix_agent2.go
index dd6d853777d..9a24689ec86 100644
--- a/src/go/cmd/zabbix_agent2/zabbix_agent2.go
+++ b/src/go/cmd/zabbix_agent2/zabbix_agent2.go
@@ -31,6 +31,7 @@ import (
"git.zabbix.com/ap/plugin-support/conf"
"git.zabbix.com/ap/plugin-support/log"
+ "git.zabbix.com/ap/plugin-support/plugin/comms"
"zabbix.com/internal/agent"
"zabbix.com/internal/agent/keyaccess"
"zabbix.com/internal/agent/remotecontrol"
@@ -300,7 +301,7 @@ func main() {
})
if argVersion {
- version.Display()
+ version.Display([]string{fmt.Sprintf("Plugin support version %d.%d\n", comms.MajorVersion, comms.MinorVersion)})
os.Exit(0)
}
diff --git a/src/go/cmd/zabbix_web_service/zabbix_web_service.go b/src/go/cmd/zabbix_web_service/zabbix_web_service.go
index 902f0450b54..78e226cbef7 100644
--- a/src/go/cmd/zabbix_web_service/zabbix_web_service.go
+++ b/src/go/cmd/zabbix_web_service/zabbix_web_service.go
@@ -76,7 +76,7 @@ func main() {
}
if versionFlag {
- version.Display()
+ version.Display(nil)
os.Exit(0)
}
diff --git a/src/go/conf/zabbix_agent2.d/plugins.d/mongodb.conf b/src/go/conf/zabbix_agent2.d/plugins.d/mongodb.conf
deleted file mode 100644
index 7b31fc2980c..00000000000
--- a/src/go/conf/zabbix_agent2.d/plugins.d/mongodb.conf
+++ /dev/null
@@ -1,41 +0,0 @@
-### Option: Plugins.Mongo.Timeout
-# Amount of time to wait for a server to respond when first connecting and on
-# follow up operations in the session.
-#
-# Mandatory: no
-# Range: 1-30
-# Default:
-# Plugins.Mongo.Timeout=<Global timeout>
-
-### Option: Plugins.Mongo.KeepAlive
-# Time in seconds for waiting before unused connections will be closed.
-#
-# Mandatory: no
-# Range: 60-900
-# Default:
-# Plugins.Mongo.KeepAlive=300
-
-### Option: Plugins.Mongo.Sessions.*.Uri
-# Uri to connect. "*" should be replaced with a session name.
-#
-# Mandatory: no
-# Range:
-# Must matches the URI format.
-# The only supported schema is "tcp".
-# Embedded credentials will be ignored.
-# Default:
-# Plugins.Mongo.Sessions.*.Uri=
-
-### Option: Plugins.Mongo.Sessions.*.User
-# Username to send to protected MongoDB server. "*" should be replaced with a session name.
-#
-# Mandatory: no
-# Default:
-# Plugins.Mongo.Sessions.*.User=
-
-### Option: Plugins.Mongo.Sessions.*.Password
-# Password to send to protected MongoDB server. "*" should be replaced with a session name.
-#
-# Mandatory: no
-# Default:
-# Plugins.Mongo.Sessions.*.Password=
diff --git a/src/go/go.mod b/src/go/go.mod
index b8d5d3a6dea..a49e83baa4d 100644
--- a/src/go/go.mod
+++ b/src/go/go.mod
@@ -3,7 +3,7 @@ module zabbix.com
go 1.16
require (
- git.zabbix.com/ap/plugin-support v0.0.2
+ git.zabbix.com/ap/plugin-support v0.0.0-20220608100211-35b8bffd7ad0
github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69
github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5
github.com/chromedp/cdproto v0.0.0-20210104223854-2cc87dae3ee3
@@ -29,6 +29,5 @@ require (
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
- gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
gopkg.in/yaml.v2 v2.2.8 // indirect
)
diff --git a/src/go/go.sum b/src/go/go.sum
index f6ff11f256f..52f19406c56 100644
--- a/src/go/go.sum
+++ b/src/go/go.sum
@@ -1,3 +1,15 @@
+git.zabbix.com/ap/plugin-support v0.0.0-20220524072909-7233a93fe116 h1:IGbQPDh/U7UHSM0M4h2k/wdRjyO3zG8uq4Dx+gej4y8=
+git.zabbix.com/ap/plugin-support v0.0.0-20220524072909-7233a93fe116/go.mod h1:R3QzQWgpxlA+ddJNkOhsPTcGOVtrR69WS0hXIsnBurY=
+git.zabbix.com/ap/plugin-support v0.0.0-20220524082638-7fd18350f78d h1:fv+sfiIPe6U8X0T+vrpI4bBpo5kzYnp9qxNwA30Ay/U=
+git.zabbix.com/ap/plugin-support v0.0.0-20220524082638-7fd18350f78d/go.mod h1:R3QzQWgpxlA+ddJNkOhsPTcGOVtrR69WS0hXIsnBurY=
+git.zabbix.com/ap/plugin-support v0.0.0-20220525103006-9363aae69d10 h1:zRmgkxzcYbFXMmHgLQ0i3FIOUhqLKK6uCTF/Fdlbl00=
+git.zabbix.com/ap/plugin-support v0.0.0-20220525103006-9363aae69d10/go.mod h1:R3QzQWgpxlA+ddJNkOhsPTcGOVtrR69WS0hXIsnBurY=
+git.zabbix.com/ap/plugin-support v0.0.0-20220530082632-bd6f3ae15c88 h1:a/w3gm8CKYRkw7gRu6+ufdCAadmWjnh4LNHqQRG3Nu4=
+git.zabbix.com/ap/plugin-support v0.0.0-20220530082632-bd6f3ae15c88/go.mod h1:R3QzQWgpxlA+ddJNkOhsPTcGOVtrR69WS0hXIsnBurY=
+git.zabbix.com/ap/plugin-support v0.0.0-20220601115430-7e21b812be52 h1:dgk5oJtySHzjkkzfELzM4tkAQbjJO9krfj0GJQMXdS4=
+git.zabbix.com/ap/plugin-support v0.0.0-20220601115430-7e21b812be52/go.mod h1:R3QzQWgpxlA+ddJNkOhsPTcGOVtrR69WS0hXIsnBurY=
+git.zabbix.com/ap/plugin-support v0.0.0-20220608100211-35b8bffd7ad0 h1:VZuQnO95vu+83jsQaY+HGDzx+SoTf8inEZFkZhPvZtM=
+git.zabbix.com/ap/plugin-support v0.0.0-20220608100211-35b8bffd7ad0/go.mod h1:R3QzQWgpxlA+ddJNkOhsPTcGOVtrR69WS0hXIsnBurY=
git.zabbix.com/ap/plugin-support v0.0.2 h1:ce5LDuqEK4yYrD0g2BNWrSlnr33AVB3QT4G3wmjhDts=
git.zabbix.com/ap/plugin-support v0.0.2/go.mod h1:R3QzQWgpxlA+ddJNkOhsPTcGOVtrR69WS0hXIsnBurY=
github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69 h1:+tu3HOoMXB7RXEINRVIpxJCT+KdYiI7LAEAUrOw3dIU=
@@ -111,11 +123,9 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
@@ -234,12 +244,9 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/src/go/internal/agent/scheduler/manager.go b/src/go/internal/agent/scheduler/manager.go
index fce6572f379..fa0c32abc0a 100644
--- a/src/go/internal/agent/scheduler/manager.go
+++ b/src/go/internal/agent/scheduler/manager.go
@@ -30,6 +30,7 @@ import (
"git.zabbix.com/ap/plugin-support/conf"
"git.zabbix.com/ap/plugin-support/log"
"git.zabbix.com/ap/plugin-support/plugin"
+ "git.zabbix.com/ap/plugin-support/plugin/comms"
"zabbix.com/internal/agent"
"zabbix.com/internal/agent/alias"
"zabbix.com/internal/agent/keyaccess"
@@ -538,6 +539,8 @@ func (m *Manager) init() {
}
func (m *Manager) Start() {
+ log.Infof("%s", comms.GetPluginVersionMessage())
+
monitor.Register(monitor.Scheduler)
go m.run()
}
diff --git a/src/go/pkg/version/version.go b/src/go/pkg/version/version.go
index 59ec4debe3a..7328d07fdfe 100644
--- a/src/go/pkg/version/version.go
+++ b/src/go/pkg/version/version.go
@@ -29,8 +29,8 @@ const (
ZABBIX_REVDATE = "30 May 2022"
ZABBIX_VERSION_MAJOR = 6
ZABBIX_VERSION_MINOR = 0
- ZABBIX_VERSION_PATCH = 5
- ZABBIX_VERSION_RC = ""
+ ZABBIX_VERSION_PATCH = 6
+ ZABBIX_VERSION_RC = "rc1"
ZABBIX_VERSION_RC_NUM = "{ZABBIX_RC_NUM}"
ZABBIX_VERSION_REVISION = "{ZABBIX_REVISION}"
copyrightMessage = "Copyright (C) 2022 Zabbix SIA\n" +
@@ -140,9 +140,15 @@ func TitleMessage() string {
return title
}
-func Display() {
+func Display(additionalMessages []string) {
fmt.Printf("%s (Zabbix) %s\n", TitleMessage(), Long())
- fmt.Printf("Revision %s %s, compilation time: %s %s\n\n", Revision(), RevDate(), CompileDate(), CompileTime())
+ fmt.Printf("Revision %s %s, compilation time: %s %s\n", Revision(), RevDate(), CompileDate(), CompileTime())
+
+ for _, msg := range additionalMessages {
+ fmt.Println(msg)
+ }
+
+ fmt.Println()
fmt.Println(CopyrightMessage())
}
diff --git a/src/go/plugins/external/broker.go b/src/go/plugins/external/broker.go
index ecf6f463365..df6310968e2 100644
--- a/src/go/plugins/external/broker.go
+++ b/src/go/plugins/external/broker.go
@@ -24,6 +24,7 @@ import (
"errors"
"fmt"
"net"
+ "strconv"
"time"
"git.zabbix.com/ap/plugin-support/conf"
@@ -322,7 +323,7 @@ func (b *pluginBroker) register() (*comms.RegisterResponse, error) {
Common: comms.Common{
Type: comms.RegisterRequestType,
},
- Version: comms.Version,
+ Version: strconv.Itoa(comms.MajorVersion),
},
out: make(chan interface{}),
}
diff --git a/src/go/plugins/external/plugin.go b/src/go/plugins/external/plugin.go
index 62ff260e8be..f557d62136f 100644
--- a/src/go/plugins/external/plugin.go
+++ b/src/go/plugins/external/plugin.go
@@ -59,6 +59,7 @@ func (p *Plugin) Register() (response *comms.RegisterResponse, err error) {
func (p *Plugin) ExecutePlugin() {
startLock.Lock()
defer startLock.Unlock()
+
p.cmd = exec.Command(p.Path, p.Socket, strconv.FormatBool(p.Initial))
err := p.cmd.Start()
diff --git a/src/go/plugins/mongodb/README.md b/src/go/plugins/mongodb/README.md
deleted file mode 100644
index eba29e9f4cd..00000000000
--- a/src/go/plugins/mongodb/README.md
+++ /dev/null
@@ -1,143 +0,0 @@
-# MongoDB plugin
-Provides native Zabbix solution for monitoring MongoDB servers and clusters (document-based, distributed database).
-It can monitor several MongoDB instances simultaneously, remotes or locals to the Zabbix Agent.
-The plugin keeps connections in the opened state to reduce network
-congestion, latency, CPU and memory usage. Best for use in conjunction with the official
-[MongoDB template.](https://git.zabbix.com/projects/ZBX/repos/zabbix/browse/templates/app/mongodb)
-You can extend it or create your template for your specific needs.
-
-## Requirements
-* Zabbix Agent 2
-* Go >= 1.13 (required only to build from the source)
-
-## Supported versions
-* MongoDB, versions 4.4, 4.2, 4.0 and 3.6
-
-## Installation
-Depending on your configuration you need to create a local read-only user in the admin database:
-- *STANDALONE*: for each single MongoDB node.
-- *REPLICASET*: create the user on the primary node of the replica set.
-- *SHARDING*: for each shard in your cluster (just create the user on the primary node of the replica set).
-Also, create the same user on a mongos router. It will automatically spread to config servers.
-
-```javascript
-use admin
-
-db.auth("admin", "<ADMIN_PASSWORD>")
-
-db.createUser({
- "user": "zabbix",
- "pwd": "<PASSWORD>",
- "roles": [
- { role: "readAnyDatabase", db: "admin" },
- { role: "clusterMonitor", db: "admin" },
- ]
-})
-```
-
-## Configuration
-The Zabbix Agent's configuration file is used to configure plugins.
-
-**Plugins.Mongo.KeepAlive** — Sets a time for waiting before unused connections will be closed.
-*Default value:* 300 sec.
-*Limits:* 60-900
-
-**Plugins.Mongo.Timeout** — The amount of time to wait for a server to respond when first connecting and on follow up
-operations in the session.
-*Default value:* equals the global Timeout configuration parameter.
-*Limits:* 1-30
-
-### Configuring connection
-A connection can be configured using either keys' parameters or named sessions.
-
-*Notes*:
-* It is not possible to mix configuration using named sessions and keys' parameters simultaneously.
-* You can leave any connection parameter empty, a default hard-coded value will be used in the such case:
- localhost:27017 without authentication.
-* Embedded URI credentials (userinfo) are forbidden and will be ignored. So, you can't pass the credentials by this:
-
- mongodb.ping[tcp://user:password@127.0.0.1] — WRONG
-
- The correct way is:
-
- mongodb.ping[tcp://127.0.0.1,user,password]
-
-* Currently, only TCP connections supported.
-
-Examples of valid URIs:
- - tcp://127.0.0.1:27017
- - tcp://localhost
- - localhost
-
-#### Using keys' parameters
-The common parameters for all keys are: [ConnString][,User][,Password]
-Where ConnString can be either a URI or session name.
-ConnString will be treated as a URI if no session with the given name found.
-If you use ConnString as a session name, just skip the rest of the connection parameters.
-
-#### Using named sessions
-Named sessions allow you to define specific parameters for each MongoDB instance. Currently, there are only three supported
-parameters: Uri, User and Password. It's a bit more secure way to store credentials compared to item keys or macros.
-
-E.g: suppose you have two MongoDB instances: "Prod" and "Test".
-You should add the following options to the agent configuration file:
-
- Plugins.Mongo.Sessions.Prod.Uri=tcp://192.168.1.1:27017
- Plugins.Mongo.Sessions.Prod.User=<UserForProd>
- Plugins.Mongo.Sessions.Prod.Password=<PasswordForProd>
-
- Plugins.Mongo.Sessions.Test.Uri=tcp://192.168.0.1:27017
- Plugins.Mongo.Sessions.Test.User=<UserForTest>
- Plugins.Mongo.Sessions.Test.Password=<PasswordForTest>
-
-Then you will be able to use these names as the 1st parameter (ConnString) in keys instead of URIs, e.g:
-
- mongodb.ping[Prod]
- mongodb.ping[Test]
-
-*Note*: sessions names are case-sensitive.
-
-## Supported keys
-**mongodb.collection.stats[\<commonParams\>[,database],collection]** — Returns a variety of storage statistics for a
-given collection.
-*Parameters:*
-database — database name (default: admin).
-collection (required) — collection name.
-
-**mongodb.cfg.discovery[\<commonParams\>]** — Returns a list of discovered config servers.
-
-**mongodb.collections.discovery[\<commonParams\>]** — Returns a list of discovered collections.
-
-**mongodb.collections.usage[\<commonParams\>]** — Returns usage statistics for collections.
-
-**mongodb.connpool.stats[\<commonParams\>]** — Returns information regarding the open outgoing connections from the
-current database instance to other members of the sharded cluster or replica set.
-
-**mongodb.db.stats[\<commonParams\>[,database]]** — Returns statistics reflecting a given database system’s state.
-*Parameters:*
-database — database name (default: admin).
-
-**mongodb.db.discovery[\<commonParams\>]** — Returns a list of discovered databases.
-
-**mongodb.jumbo_chunks.count[\<commonParams\>]** — Returns count of jumbo chunks.
-
-**mongodb.oplog.stats[\<commonParams\>]** — Returns a status of the replica set, using data polled from the oplog.
-
-**mongodb.ping[\<commonParams\>]** — Tests if a connection is alive or not.
-*Returns:*
-- "1" if a connection is alive.
-- "0" if a connection is broken (if there is any error presented including AUTH and configuration issues).
-
-**mongodb.rs.config[\<commonParams\>]** — Returns a current configuration of the replica set.
-
-**mongodb.rs.status[\<commonParams\>]** — Returns a replica set status from the point of view of the member
-where the method is run.
-
-**mongodb.server.status[\<commonParams\>]** — Returns a database’s state.
-
-**mongodb.sh.discovery[\<commonParams\>]** — Returns a list of discovered shards present in the cluster.
-
-## Troubleshooting
-The plugin uses Zabbix agent's logs. You can increase debugging level of Zabbix Agent if you need more details about
-what is happening.
-Set the DebugLevel configuration option to "5" (extended debugging) in order to turn on verbose log messages for the MGO package.
diff --git a/src/go/plugins/mongodb/config.go b/src/go/plugins/mongodb/config.go
deleted file mode 100644
index dbba4c6411c..00000000000
--- a/src/go/plugins/mongodb/config.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "git.zabbix.com/ap/plugin-support/conf"
- "git.zabbix.com/ap/plugin-support/plugin"
-)
-
-type PluginOptions struct {
- // Timeout is the amount of time to wait for a server to respond when
- // first connecting and on follow up operations in the session.
- Timeout int `conf:"optional,range=1:30"`
-
- // KeepAlive is a time to wait before unused connections will be closed.
- KeepAlive int `conf:"optional,range=60:900,default=300"`
-
- // Sessions stores pre-defined named sets of connections settings.
- Sessions map[string]conf.Session `conf:"optional"`
-}
-
-// Configure implements the Configurator interface.
-// Initializes configuration structures.
-func (p *Plugin) Configure(global *plugin.GlobalOptions, options interface{}) {
- if err := conf.Unmarshal(options, &p.options); err != nil {
- p.Errf("cannot unmarshal configuration options: %s", err)
- }
-
- if p.options.Timeout == 0 {
- p.options.Timeout = global.Timeout
- }
-}
-
-// Validate implements the Configurator interface.
-// Returns an error if validation of a plugin's configuration is failed.
-func (p *Plugin) Validate(options interface{}) error {
- var opts PluginOptions
-
- return conf.Unmarshal(options, &opts)
-}
diff --git a/src/go/plugins/mongodb/conn.go b/src/go/plugins/mongodb/conn.go
deleted file mode 100644
index 1e9d787d02f..00000000000
--- a/src/go/plugins/mongodb/conn.go
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "context"
- "sync"
- "time"
-
- "gopkg.in/mgo.v2/bson"
-
- "git.zabbix.com/ap/plugin-support/log"
- "git.zabbix.com/ap/plugin-support/uri"
- "gopkg.in/mgo.v2"
-)
-
-type MongoConn struct {
- addr string
- timeout time.Duration
- lastTimeAccess time.Time
- session *mgo.Session
-}
-
-// DB shadows *mgo.DB to returns a Database interface instead of *mgo.Database.
-func (conn *MongoConn) DB(name string) Database {
- conn.checkConnection()
-
- return &MongoDatabase{Database: conn.session.DB(name)}
-}
-
-func (conn *MongoConn) DatabaseNames() (names []string, err error) {
- conn.checkConnection()
-
- return conn.session.DatabaseNames()
-}
-
-func (conn *MongoConn) Ping() error {
- return conn.session.DB("admin").Run(&bson.D{
- bson.DocElem{
- Name: "ping",
- Value: 1,
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: conn.GetMaxTimeMS(),
- },
- }, nil)
-}
-
-func (conn *MongoConn) GetMaxTimeMS() int64 {
- return conn.timeout.Milliseconds()
-}
-
-// updateAccessTime updates the last time a connection was accessed.
-func (conn *MongoConn) updateAccessTime() {
- conn.lastTimeAccess = time.Now()
-}
-
-// checkConnection implements db reconnection.
-func (conn *MongoConn) checkConnection() {
- if err := conn.Ping(); err != nil {
- conn.session.Refresh()
- log.Debugf("[%s] Attempt to reconnect: %s", pluginName, conn.addr)
- }
-}
-
-// Session is an interface to access to the session struct.
-type Session interface {
- DB(name string) Database
- DatabaseNames() (names []string, err error)
- GetMaxTimeMS() int64
- Ping() error
-}
-
-// Database is an interface to access to the database struct.
-type Database interface {
- C(name string) Collection
- CollectionNames() (names []string, err error)
- Run(cmd, result interface{}) error
-}
-
-// MongoDatabase wraps a mgo.Database to embed methods in models.
-type MongoDatabase struct {
- *mgo.Database
-}
-
-// C shadows *mgo.DB to returns a Database interface instead of *mgo.Database.
-func (d *MongoDatabase) C(name string) Collection {
- return &MongoCollection{Collection: d.Database.C(name)}
-}
-
-func (d *MongoDatabase) CollectionNames() (names []string, err error) {
- return d.Database.CollectionNames()
-}
-
-// Run shadows *mgo.DB to returns a Database interface instead of *mgo.Database.
-func (d *MongoDatabase) Run(cmd, result interface{}) error {
- return d.Database.Run(cmd, result)
-}
-
-// MongoCollection wraps a mgo.Collection to embed methods in models.
-type MongoCollection struct {
- *mgo.Collection
-}
-
-// Collection is an interface to access to the collection struct.
-type Collection interface {
- Find(query interface{}) Query
-}
-
-// Find shadows *mgo.Collection to returns a Query interface instead of *mgo.Query.
-func (c *MongoCollection) Find(query interface{}) Query {
- return &MongoQuery{Query: c.Collection.Find(query)}
-}
-
-// Query is an interface to access to the query struct
-type Query interface {
- All(result interface{}) error
- Count() (n int, err error)
- Limit(n int) Query
- One(result interface{}) error
- SetMaxTime(d time.Duration) Query
- Sort(fields ...string) Query
-}
-
-// MongoQuery wraps a mgo.Query to embed methods in models.
-type MongoQuery struct {
- *mgo.Query
-}
-
-func (q *MongoQuery) Limit(n int) Query {
- q.Query.Limit(n)
- return q
-}
-
-func (q *MongoQuery) SetMaxTime(d time.Duration) Query {
- q.Query.SetMaxTime(d)
- return q
-}
-
-func (q *MongoQuery) Sort(fields ...string) Query {
- q.Query.Sort(fields...)
- return q
-}
-
-// ConnManager is thread-safe structure for manage connections.
-type ConnManager struct {
- sync.Mutex
- connMutex sync.Mutex
- connections map[uri.URI]*MongoConn
- keepAlive time.Duration
- timeout time.Duration
- Destroy context.CancelFunc
-}
-
-// NewConnManager initializes connManager structure and runs Go Routine that watches for unused connections.
-func NewConnManager(keepAlive, timeout, hkInterval time.Duration) *ConnManager {
- ctx, cancel := context.WithCancel(context.Background())
-
- connMgr := &ConnManager{
- connections: make(map[uri.URI]*MongoConn),
- keepAlive: keepAlive,
- timeout: timeout,
- Destroy: cancel, // Destroy stops originated goroutines and close connections.
- }
-
- go connMgr.housekeeper(ctx, hkInterval)
-
- return connMgr
-}
-
-// closeUnused closes each connection that has not been accessed at least within the keepalive interval.
-func (c *ConnManager) closeUnused() {
- c.connMutex.Lock()
- defer c.connMutex.Unlock()
-
- for uri, conn := range c.connections {
- if time.Since(conn.lastTimeAccess) > c.keepAlive {
- conn.session.Close()
- delete(c.connections, uri)
- log.Debugf("[%s] Closed unused connection: %s", pluginName, uri.Addr())
- }
- }
-}
-
-// closeAll closes all existed connections.
-func (c *ConnManager) closeAll() {
- c.connMutex.Lock()
- for uri, conn := range c.connections {
- conn.session.Close()
- delete(c.connections, uri)
- }
- c.connMutex.Unlock()
-}
-
-// housekeeper repeatedly checks for unused connections and close them.
-func (c *ConnManager) housekeeper(ctx context.Context, interval time.Duration) {
- ticker := time.NewTicker(interval)
-
- for {
- select {
- case <-ctx.Done():
- ticker.Stop()
- c.closeAll()
-
- return
- case <-ticker.C:
- c.closeUnused()
- }
- }
-}
-
-// create creates a new connection with given credentials.
-func (c *ConnManager) create(uri uri.URI) (*MongoConn, error) {
- c.connMutex.Lock()
- defer c.connMutex.Unlock()
-
- if _, ok := c.connections[uri]; ok {
- // Should never happen.
- panic("connection already exists")
- }
-
- session, err := mgo.DialWithInfo(&mgo.DialInfo{
- Addrs: []string{uri.Addr()},
- Direct: true,
- FailFast: false,
- Password: uri.Password(),
- PoolLimit: 1,
- Timeout: c.timeout,
- Username: uri.User(),
- })
- if err != nil {
- return nil, err
- }
-
- // Read from one of the nearest members, irrespective of it being primary or secondary.
- session.SetMode(mgo.Nearest, true)
-
- c.connections[uri] = &MongoConn{
- addr: uri.Addr(),
- timeout: c.timeout,
- lastTimeAccess: time.Now(),
- session: session,
- }
-
- log.Debugf("[%s] Created new connection: %s", pluginName, uri.Addr())
-
- return c.connections[uri], nil
-}
-
-// get returns a connection with given uri if it exists and also updates lastTimeAccess, otherwise returns nil.
-func (c *ConnManager) get(uri uri.URI) *MongoConn {
- c.connMutex.Lock()
- defer c.connMutex.Unlock()
-
- if conn, ok := c.connections[uri]; ok {
- conn.updateAccessTime()
- return conn
- }
-
- return nil
-}
-
-// GetConnection returns an existing connection or creates a new one.
-func (c *ConnManager) GetConnection(uri uri.URI) (conn *MongoConn, err error) {
- c.Lock()
- defer c.Unlock()
-
- conn = c.get(uri)
-
- if conn == nil {
- conn, err = c.create(uri)
- }
-
- return
-}
diff --git a/src/go/plugins/mongodb/handler_collection_stats.go b/src/go/plugins/mongodb/handler_collection_stats.go
deleted file mode 100644
index e1091fb48a5..00000000000
--- a/src/go/plugins/mongodb/handler_collection_stats.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-// collectionStatsHandler
-// https://docs.mongodb.com/manual/reference/command/collStats/index.html
-func collectionStatsHandler(s Session, params map[string]string) (interface{}, error) {
- colStats := &bson.M{}
- err := s.DB(params["Database"]).Run(&bson.D{
- bson.DocElem{
- Name: "collStats",
- Value: params["Collection"],
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: s.GetMaxTimeMS(),
- },
- }, colStats)
-
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- jsonRes, err := json.Marshal(colStats)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_collection_stats_test.go b/src/go/plugins/mongodb/handler_collection_stats_test.go
deleted file mode 100644
index f8792ec776b..00000000000
--- a/src/go/plugins/mongodb/handler_collection_stats_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package mongodb
-
-import (
- "encoding/json"
- "errors"
- "io/ioutil"
- "log"
- "reflect"
- "strings"
- "testing"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-func Test_collectionStatsHandler(t *testing.T) {
- var testData map[string]interface{}
-
- jsonData, err := ioutil.ReadFile("testdata/collStats.json")
- if err != nil {
- log.Fatal(err)
- }
-
- err = json.Unmarshal(jsonData, &testData)
- if err != nil {
- log.Fatal(err)
- }
-
- mockSession := NewMockConn()
- db := mockSession.DB("MyDatabase")
- db.(*MockMongoDatabase).RunFunc = func(dbName, cmd string) ([]byte, error) {
- if cmd == "collStats" {
- return bson.Marshal(testData)
- }
-
- return nil, errors.New("no such cmd: " + cmd)
- }
-
- type args struct {
- s Session
- params map[string]string
- }
-
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must parse an output of \" + collStats + \"command",
- args: args{
- s: mockSession,
- params: map[string]string{"Database": "MyDatabase", "Collection": "MyCollection"},
- },
- want: strings.TrimSpace(string(jsonData)),
- wantErr: nil,
- },
- {
- name: "Must catch DB.Run() error",
- args: args{
- s: mockSession,
- params: map[string]string{"Database": mustFail},
- },
- want: nil,
- wantErr: zbxerr.ErrorCannotFetchData,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := collectionStatsHandler(tt.args.s, tt.args.params)
-
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("collectionStatsHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
-
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("collectionStatsHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_collections_discovery.go b/src/go/plugins/mongodb/handler_collections_discovery.go
deleted file mode 100644
index eccc7f59689..00000000000
--- a/src/go/plugins/mongodb/handler_collections_discovery.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
- "sort"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
-)
-
-type colEntity struct {
- ColName string `json:"{#COLLECTION}"`
- DbName string `json:"{#DBNAME}"`
-}
-
-// collectionsDiscoveryHandler
-// https://docs.mongodb.com/manual/reference/command/listDatabases/
-func collectionsDiscoveryHandler(s Session, _ map[string]string) (interface{}, error) {
- dbs, err := s.DatabaseNames()
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- sort.Strings(dbs)
-
- lld := make([]colEntity, 0)
-
- for _, db := range dbs {
- collections, err := s.DB(db).CollectionNames()
-
- sort.Strings(collections)
-
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- for _, col := range collections {
- lld = append(lld, colEntity{
- ColName: col,
- DbName: db,
- })
- }
- }
-
- jsonLLD, err := json.Marshal(lld)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonLLD), nil
-}
diff --git a/src/go/plugins/mongodb/handler_collections_discovery_test.go b/src/go/plugins/mongodb/handler_collections_discovery_test.go
deleted file mode 100644
index 5f00491a087..00000000000
--- a/src/go/plugins/mongodb/handler_collections_discovery_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package mongodb
-
-import (
- "errors"
- "reflect"
- "testing"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
-)
-
-func Test_collectionsDiscoveryHandler(t *testing.T) {
- type args struct {
- s Session
- dbs map[string][]string
- }
-
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must return a list of collections",
- args: args{
- s: NewMockConn(),
- dbs: map[string][]string{
- "testdb": {"col1", "col2"},
- "local": {"startup_log"},
- "config": {"system.sessions"},
- },
- },
- want: "[{\"{#COLLECTION}\":\"system.sessions\",\"{#DBNAME}\":\"config\"},{\"{#COLLECTION}\":" +
- "\"startup_log\",\"{#DBNAME}\":\"local\"},{\"{#COLLECTION}\":\"col1\",\"{#DBNAME}\":\"testdb\"}," +
- "{\"{#COLLECTION}\":\"col2\",\"{#DBNAME}\":\"testdb\"}]",
- wantErr: nil,
- },
- {
- name: "Must catch DB.DatabaseNames() error",
- args: args{
- s: NewMockConn(),
- dbs: map[string][]string{mustFail: {}},
- },
- want: nil,
- wantErr: zbxerr.ErrorCannotFetchData,
- },
- {
- name: "Must catch DB.CollectionNames() error",
- args: args{
- s: NewMockConn(),
- dbs: map[string][]string{"MyDatabase": {mustFail}},
- },
- want: nil,
- wantErr: zbxerr.ErrorCannotFetchData,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- for db, cc := range tt.args.dbs {
- tt.args.s.DB(db)
- for _, c := range cc {
- tt.args.s.DB(db).C(c)
- }
- }
-
- got, err := collectionsDiscoveryHandler(tt.args.s, nil)
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("collectionsDiscoveryHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
-
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("collectionsDiscoveryHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_collections_usage.go b/src/go/plugins/mongodb/handler_collections_usage.go
deleted file mode 100644
index 825a660c2ea..00000000000
--- a/src/go/plugins/mongodb/handler_collections_usage.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-// collectionsUsageHandler
-// https://docs.mongodb.com/manual/reference/command/top/index.html
-func collectionsUsageHandler(s Session, _ map[string]string) (interface{}, error) {
- colUsage := &bson.M{}
-
- err := s.DB("admin").Run(&bson.D{
- bson.DocElem{
- Name: "top",
- Value: 1,
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: s.GetMaxTimeMS(),
- },
- }, colUsage)
-
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- jsonRes, err := json.Marshal(colUsage)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_collections_usage_test.go b/src/go/plugins/mongodb/handler_collections_usage_test.go
deleted file mode 100644
index 478141c6a5e..00000000000
--- a/src/go/plugins/mongodb/handler_collections_usage_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package mongodb
-
-import (
- "encoding/json"
- "errors"
- "io/ioutil"
- "log"
- "reflect"
- "strings"
- "testing"
-
- "gopkg.in/mgo.v2/bson"
-)
-
-func Test_collectionsUsageHandler(t *testing.T) {
- var testData map[string]interface{}
-
- jsonData, err := ioutil.ReadFile("testdata/top.json")
- if err != nil {
- log.Fatal(err)
- }
-
- err = json.Unmarshal(jsonData, &testData)
- if err != nil {
- log.Fatal(err)
- }
-
- mockSession := NewMockConn()
- db := mockSession.DB("admin")
- db.(*MockMongoDatabase).RunFunc = func(dbName, cmd string) ([]byte, error) {
- if cmd == "top" {
- return bson.Marshal(testData)
- }
-
- return nil, errors.New("no such cmd: " + cmd)
- }
-
- type args struct {
- s Session
- }
-
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must parse an output of \" + top + \"command",
- args: args{
- s: mockSession,
- },
- want: strings.TrimSpace(string(jsonData)),
- wantErr: nil,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := collectionsUsageHandler(tt.args.s, nil)
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("collectionsUsageHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("collectionsUsageHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_config_discovery.go b/src/go/plugins/mongodb/handler_config_discovery.go
deleted file mode 100644
index 616ed2eca5d..00000000000
--- a/src/go/plugins/mongodb/handler_config_discovery.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
- "fmt"
- "net"
- "strings"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-type lldCfgEntity struct {
- ReplicaSet string `json:"{#REPLICASET}"`
- Hostname string `json:"{#HOSTNAME}"`
- MongodURI string `json:"{#MONGOD_URI}"`
-}
-
-type shardMap struct {
- Map map[string]string
-}
-
-// configDiscoveryHandler
-// https://docs.mongodb.com/manual/reference/command/getShardMap/#dbcmd.getShardMap
-func configDiscoveryHandler(s Session, params map[string]string) (interface{}, error) {
- var cfgServers shardMap
- err := s.DB("admin").Run(&bson.D{
- bson.DocElem{
- Name: "getShardMap",
- Value: 1,
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: s.GetMaxTimeMS(),
- },
- }, &cfgServers)
-
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- lld := make([]lldCfgEntity, 0)
-
- if servers, ok := cfgServers.Map["config"]; ok {
- var rs string
-
- hosts := servers
-
- h := strings.SplitN(hosts, "/", 2)
- if len(h) > 1 {
- rs = h[0]
- hosts = h[1]
- }
-
- for _, hostport := range strings.Split(hosts, ",") {
- host, _, err := net.SplitHostPort(hostport)
- if err != nil {
- return nil, zbxerr.ErrorCannotParseResult.Wrap(err)
- }
-
- lld = append(lld, lldCfgEntity{
- Hostname: host,
- MongodURI: fmt.Sprintf("%s://%s", uriDefaults.Scheme, hostport),
- ReplicaSet: rs,
- })
- }
- } else {
- return nil, zbxerr.ErrorCannotParseResult
- }
-
- jsonRes, err := json.Marshal(lld)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_connPool_stats.go b/src/go/plugins/mongodb/handler_connPool_stats.go
deleted file mode 100644
index 64e7a4393e6..00000000000
--- a/src/go/plugins/mongodb/handler_connPool_stats.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-// connPoolStatsHandler
-//https://docs.mongodb.com/manual/reference/command/connPoolStats/#dbcmd.connPoolStats
-func connPoolStatsHandler(s Session, params map[string]string) (interface{}, error) {
- connPoolStats := &bson.M{}
- err := s.DB(params["Database"]).Run(&bson.D{
- bson.DocElem{
- Name: "connPoolStats",
- Value: 1,
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: s.GetMaxTimeMS(),
- },
- }, connPoolStats)
-
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- jsonRes, err := json.Marshal(connPoolStats)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_connPool_stats_test.go b/src/go/plugins/mongodb/handler_connPool_stats_test.go
deleted file mode 100644
index abe88dae2cb..00000000000
--- a/src/go/plugins/mongodb/handler_connPool_stats_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package mongodb
-
-import (
- "encoding/json"
- "errors"
- "io/ioutil"
- "log"
- "reflect"
- "strings"
- "testing"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-func Test_connPoolStatsHandler(t *testing.T) {
- var testData map[string]interface{}
-
- jsonData, err := ioutil.ReadFile("testdata/connPoolStats.json")
- if err != nil {
- log.Fatal(err)
- }
-
- err = json.Unmarshal(jsonData, &testData)
- if err != nil {
- log.Fatal(err)
- }
-
- mockSession := NewMockConn()
- db := mockSession.DB("testdb")
- db.(*MockMongoDatabase).RunFunc = func(dbName, cmd string) ([]byte, error) {
- if cmd == "connPoolStats" {
- return bson.Marshal(testData)
- }
-
- return nil, errors.New("no such cmd: " + cmd)
- }
-
- type args struct {
- s Session
- params map[string]string
- }
-
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must parse an output of \" + connPoolStats + \"command",
- args: args{
- s: mockSession,
- params: map[string]string{"Database": "testdb"},
- },
- want: strings.TrimSpace(string(jsonData)),
- wantErr: nil,
- },
- {
- name: "Must not fail on unknown db",
- args: args{
- s: mockSession,
- params: map[string]string{"Database": "not_exists"},
- },
- want: "{\"ok\":1}",
- wantErr: nil,
- },
- {
- name: "Must catch DB.Run() error",
- args: args{
- s: mockSession,
- params: map[string]string{"Database": mustFail},
- },
- want: nil,
- wantErr: zbxerr.ErrorCannotFetchData,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := connPoolStatsHandler(tt.args.s, tt.args.params)
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("connPoolStatsHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("connPoolStatsHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_database_stats.go b/src/go/plugins/mongodb/handler_database_stats.go
deleted file mode 100644
index 7d7e3e8a5fd..00000000000
--- a/src/go/plugins/mongodb/handler_database_stats.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-// databaseStatsHandler
-// https://docs.mongodb.com/manual/reference/command/dbStats/index.html
-func databaseStatsHandler(s Session, params map[string]string) (interface{}, error) {
- dbStats := &bson.M{}
- err := s.DB(params["Database"]).Run(&bson.D{
- bson.DocElem{
- Name: "dbStats",
- Value: 1,
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: s.GetMaxTimeMS(),
- },
- }, dbStats)
-
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- jsonRes, err := json.Marshal(dbStats)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_database_stats_test.go b/src/go/plugins/mongodb/handler_database_stats_test.go
deleted file mode 100644
index 7a441ee3412..00000000000
--- a/src/go/plugins/mongodb/handler_database_stats_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package mongodb
-
-import (
- "encoding/json"
- "errors"
- "io/ioutil"
- "log"
- "reflect"
- "strings"
- "testing"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-func Test_databaseStatsHandler(t *testing.T) {
- var testData map[string]interface{}
-
- jsonData, err := ioutil.ReadFile("testdata/dbStats.json")
- if err != nil {
- log.Fatal(err)
- }
-
- err = json.Unmarshal(jsonData, &testData)
- if err != nil {
- log.Fatal(err)
- }
-
- mockSession := NewMockConn()
- db := mockSession.DB("testdb")
- db.(*MockMongoDatabase).RunFunc = func(dbName, cmd string) ([]byte, error) {
- if cmd == "dbStats" {
- return bson.Marshal(testData)
- }
-
- return nil, errors.New("no such cmd: " + cmd)
- }
-
- type args struct {
- s Session
- params map[string]string
- }
-
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must parse an output of \" + dbStats + \"command",
- args: args{
- s: mockSession,
- params: map[string]string{"Database": "testdb"},
- },
- want: strings.TrimSpace(string(jsonData)),
- wantErr: nil,
- },
- {
- name: "Must not fail on unknown db",
- args: args{
- s: mockSession,
- params: map[string]string{"Database": "not_exists"},
- },
- want: "{\"ok\":1}",
- wantErr: nil,
- },
- {
- name: "Must catch DB.Run() error",
- args: args{
- s: mockSession,
- params: map[string]string{"Database": mustFail},
- },
- want: nil,
- wantErr: zbxerr.ErrorCannotFetchData,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := databaseStatsHandler(tt.args.s, tt.args.params)
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("databaseStatsHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("databaseStatsHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_databases_discovery.go b/src/go/plugins/mongodb/handler_databases_discovery.go
deleted file mode 100644
index b556930e0e5..00000000000
--- a/src/go/plugins/mongodb/handler_databases_discovery.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
- "sort"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
-)
-
-type dbEntity struct {
- DBName string `json:"{#DBNAME}"`
-}
-
-// databasesDiscoveryHandler
-// https://docs.mongodb.com/manual/reference/command/listDatabases/
-func databasesDiscoveryHandler(s Session, _ map[string]string) (interface{}, error) {
- dbs, err := s.DatabaseNames()
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- lld := make([]dbEntity, 0)
-
- sort.Strings(dbs)
-
- for _, db := range dbs {
- lld = append(lld, dbEntity{DBName: db})
- }
-
- jsonLLD, err := json.Marshal(lld)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonLLD), nil
-}
diff --git a/src/go/plugins/mongodb/handler_databases_discovery_test.go b/src/go/plugins/mongodb/handler_databases_discovery_test.go
deleted file mode 100644
index 803fba298d5..00000000000
--- a/src/go/plugins/mongodb/handler_databases_discovery_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package mongodb
-
-import (
- "errors"
- "reflect"
- "testing"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
-)
-
-func Test_databasesDiscoveryHandler(t *testing.T) {
- type args struct {
- s Session
- dbs []string
- }
-
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must return a list of databases",
- args: args{
- s: NewMockConn(),
- dbs: []string{"testdb", "local", "config"},
- },
- want: "[{\"{#DBNAME}\":\"config\"},{\"{#DBNAME}\":\"local\"},{\"{#DBNAME}\":\"testdb\"}]",
- wantErr: nil,
- },
- {
- name: "Must catch DB.DatabaseNames() error",
- args: args{
- s: NewMockConn(),
- dbs: []string{mustFail},
- },
- want: nil,
- wantErr: zbxerr.ErrorCannotFetchData,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- for _, db := range tt.args.dbs {
- tt.args.s.DB(db)
- }
-
- got, err := databasesDiscoveryHandler(tt.args.s, nil)
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("databasesDiscoveryHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
-
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("databasesDiscoveryHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_jumbo_chunks.go b/src/go/plugins/mongodb/handler_jumbo_chunks.go
deleted file mode 100644
index 4860133d2d4..00000000000
--- a/src/go/plugins/mongodb/handler_jumbo_chunks.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-// jumboChunksHandler
-// https://docs.mongodb.com/manual/core/sharding-data-partitioning/#indivisible-jumbo-chunks
-func jumboChunksHandler(s Session, _ map[string]string) (interface{}, error) {
- jumboChunks, err := s.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- return jumboChunks, nil
-}
diff --git a/src/go/plugins/mongodb/handler_oplog_stats.go b/src/go/plugins/mongodb/handler_oplog_stats.go
deleted file mode 100644
index 6bf7d250b1e..00000000000
--- a/src/go/plugins/mongodb/handler_oplog_stats.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
- "time"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2"
- "gopkg.in/mgo.v2/bson"
-)
-
-type oplogStats struct {
- TimeDiff int `json:"timediff"` // in seconds
-}
-
-type oplogEntry struct {
- Timestamp bson.MongoTimestamp `bson:"ts"`
-}
-
-const (
- oplogReplicaSet = "oplog.rs" // the capped collection that holds the oplog for Replica Set Members
- oplogMasterSlave = "oplog.$main" // oplog for the master-slave configuration
-)
-
-const (
- sortAsc = "$natural"
- sortDesc = "-$natural"
-)
-
-var oplogQuery = bson.M{"ts": bson.M{"$exists": true}}
-
-// oplogStatsHandler
-// https://docs.mongodb.com/manual/reference/method/db.getReplicationInfo/index.html
-func oplogStatsHandler(s Session, _ map[string]string) (interface{}, error) {
- var (
- stats oplogStats
- opFirst, opLast oplogEntry
- )
-
- localDb := s.DB("local")
-
- for _, collection := range []string{oplogReplicaSet, oplogMasterSlave} {
- if err := localDb.C(collection).Find(oplogQuery).
- Sort(sortAsc).Limit(1).
- SetMaxTime(time.Duration(s.GetMaxTimeMS()) * time.Millisecond).
- One(&opFirst); err != nil {
- if err == mgo.ErrNotFound {
- continue
- }
-
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- if err := localDb.C(collection).Find(oplogQuery).Sort(sortDesc).Limit(1).One(&opLast); err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- break
- }
-
- // BSON has a special timestamp type for internal MongoDB use and is not associated with the regular Date type.
- // This internal timestamp type is a 64 bit value where:
- // the most significant 32 bits are a time_t value (seconds since the Unix epoch)
- // the least significant 32 bits are an incrementing ordinal for operations within a given second.
- stats.TimeDiff = int(opLast.Timestamp>>32 - opFirst.Timestamp>>32)
-
- jsonRes, err := json.Marshal(stats)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_oplog_stats_test.go b/src/go/plugins/mongodb/handler_oplog_stats_test.go
deleted file mode 100644
index 07c3a586983..00000000000
--- a/src/go/plugins/mongodb/handler_oplog_stats_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package mongodb
-
-import (
- "errors"
- "reflect"
- "testing"
-
- "gopkg.in/mgo.v2/bson"
-)
-
-func Test_oplogStatsHandler(t *testing.T) {
- var (
- opFirst = map[string]int64{"ts": 6908630134576644097}
- opLast = map[string]int64{"ts": 6925804549152178177}
- )
-
- mockSession := NewMockConn()
- localDb := mockSession.DB("local")
-
- dataFunc := func(_ string, _ interface{}, sortFields ...string) ([]byte, error) {
- if len(sortFields) == 0 {
- panic("sortFields must be set")
- }
-
- switch sortFields[0] {
- case sortAsc:
- return bson.Marshal(opFirst)
-
- case sortDesc:
- return bson.Marshal(opLast)
-
- default:
- panic("unknown sort type")
- }
- }
-
- type args struct {
- s Session
- collections []string
- }
-
- // WARN: tests order is significant
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must return 0 if neither oplog.rs nor oplog.$main collection found",
- args: args{
- s: mockSession,
- collections: []string{},
- },
- want: "{\"timediff\":0}",
- wantErr: nil,
- },
- {
- name: "Must calculate timediff from oplog.$main collection",
- args: args{
- s: mockSession,
- collections: []string{oplogMasterSlave},
- },
- want: "{\"timediff\":3998730}",
- wantErr: nil,
- },
- {
- name: "Must calculate timediff from oplog.rs collection",
- args: args{
- s: mockSession,
- collections: []string{oplogReplicaSet},
- },
- want: "{\"timediff\":3998730}",
- wantErr: nil,
- },
- }
-
- for _, tt := range tests {
- for _, col := range tt.args.collections {
- localDb.C(col).Find(oplogQuery).(*MockMongoQuery).DataFunc = dataFunc
- }
-
- t.Run(tt.name, func(t *testing.T) {
- got, err := oplogStatsHandler(tt.args.s, nil)
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("oplogStatsHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
-
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("oplogStatsHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_ping.go b/src/go/plugins/mongodb/handler_ping.go
deleted file mode 100644
index 2ba8435bae0..00000000000
--- a/src/go/plugins/mongodb/handler_ping.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-const (
- pingFailed = 0
- pingOk = 1
-)
-
-// pingHandler executes 'ping' command and returns pingOk if a connection is alive or pingFailed otherwise.
-// https://docs.mongodb.com/manual/reference/command/ping/index.html
-func pingHandler(s Session, _ map[string]string) (interface{}, error) {
- if err := s.Ping(); err != nil {
- return pingFailed, nil
- }
-
- return pingOk, nil
-}
diff --git a/src/go/plugins/mongodb/handler_replset_config.go b/src/go/plugins/mongodb/handler_replset_config.go
deleted file mode 100644
index 6eb4c56396d..00000000000
--- a/src/go/plugins/mongodb/handler_replset_config.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-// replSetConfigHandler
-// https://docs.mongodb.com/manual/reference/command/replSetGetConfig/index.html
-func replSetConfigHandler(s Session, _ map[string]string) (interface{}, error) {
- replSetGetConfig := &bson.M{}
- err := s.DB("admin").Run(&bson.D{
- bson.DocElem{
- Name: "replSetGetConfig",
- Value: 1,
- },
- bson.DocElem{
- Name: "commitmentStatus",
- Value: true,
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: s.GetMaxTimeMS(),
- },
- }, replSetGetConfig)
-
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- jsonRes, err := json.Marshal(replSetGetConfig)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_replset_config_test.go b/src/go/plugins/mongodb/handler_replset_config_test.go
deleted file mode 100644
index fb9926f8b76..00000000000
--- a/src/go/plugins/mongodb/handler_replset_config_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package mongodb
-
-import (
- "encoding/json"
- "errors"
- "io/ioutil"
- "log"
- "reflect"
- "strings"
- "testing"
-
- "gopkg.in/mgo.v2/bson"
-)
-
-func Test_replSetConfigHandler(t *testing.T) {
- var testData map[string]interface{}
-
- jsonData, err := ioutil.ReadFile("testdata/replSetGetConfig.json")
- if err != nil {
- log.Fatal(err)
- }
-
- err = json.Unmarshal(jsonData, &testData)
- if err != nil {
- log.Fatal(err)
- }
-
- mockSession := NewMockConn()
- db := mockSession.DB("admin")
- db.(*MockMongoDatabase).RunFunc = func(dbName, cmd string) ([]byte, error) {
- if cmd == "replSetGetConfig" {
- return bson.Marshal(testData)
- }
-
- return nil, errors.New("no such cmd: " + cmd)
- }
-
- type args struct {
- s Session
- }
-
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must parse an output of \" + replSetGetConfig + \"command",
- args: args{
- s: mockSession,
- },
- want: strings.TrimSpace(string(jsonData)),
- wantErr: nil,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := replSetConfigHandler(tt.args.s, nil)
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("replSetConfigHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
-
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("replSetConfigHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_replset_status.go b/src/go/plugins/mongodb/handler_replset_status.go
deleted file mode 100644
index 889d0183a6b..00000000000
--- a/src/go/plugins/mongodb/handler_replset_status.go
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
- "errors"
- "strings"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-const (
- statePrimary = 1
- stateSecondary = 2
-)
-
-const nodeHealthy = 1
-
-type Member struct {
- health int
- name string
- optime int
- ptr interface{}
- state int
-}
-
-type rawMember = map[string]interface{}
-
-var errUnknownStructure = errors.New("failed to parse the members structure")
-
-func parseMembers(raw []interface{}) (result []Member, err error) {
- var (
- members []Member
- primaryNode Member
- )
-
- for _, m := range raw {
- member := Member{}
- ok := true
-
- if v, ok := m.(rawMember)["name"].(string); ok {
- member.name = v
- }
-
- if v, ok := m.(rawMember)["health"].(float64); ok {
- member.health = int(v)
- }
-
- if v, ok := m.(rawMember)["optime"].(map[string]interface{}); ok {
- if ts, ok := v["ts"].(bson.MongoTimestamp); ok {
- member.optime = int(ts >> 32)
- } else {
- member.optime = int(int64(v["ts"].(float64)) >> 32)
- }
- }
-
- if v, ok := m.(rawMember)["state"].(int); ok {
- member.state = v
- }
-
- if !ok {
- return nil, errUnknownStructure
- }
-
- member.ptr = m
-
- if member.state == statePrimary {
- primaryNode = member
- } else {
- members = append(members, member)
- }
- }
-
- result = append([]Member{primaryNode}, members...)
- if len(result) == 0 {
- return nil, errUnknownStructure
- }
-
- return result, nil
-}
-
-func injectExtendedMembersStats(raw []interface{}) error {
- members, err := parseMembers(raw)
- if err != nil {
- return err
- }
-
- unhealthyNodes := []string{}
- unhealthyCount := 0
- primary := members[0]
-
- for _, node := range members {
- node.ptr.(rawMember)["lag"] = primary.optime - node.optime
-
- if node.state == stateSecondary && node.health != nodeHealthy {
- unhealthyNodes = append(unhealthyNodes, node.name)
- unhealthyCount++
- }
- }
-
- primary.ptr.(rawMember)["unhealthyNodes"] = unhealthyNodes
- primary.ptr.(rawMember)["unhealthyCount"] = unhealthyCount
- primary.ptr.(rawMember)["totalNodes"] = len(members) - 1
-
- return nil
-}
-
-// replSetStatusHandler
-// https://docs.mongodb.com/manual/reference/command/replSetGetStatus/index.html
-func replSetStatusHandler(s Session, _ map[string]string) (interface{}, error) {
- var replSetGetStatus map[string]interface{}
-
- err := s.DB("admin").Run(&bson.D{
- bson.DocElem{
- Name: "replSetGetStatus",
- Value: 1,
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: s.GetMaxTimeMS(),
- },
- }, &replSetGetStatus)
-
- if err != nil {
- if strings.Contains(err.Error(), "not running with --replSet") {
- return "{}", nil
- }
-
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- err = injectExtendedMembersStats(replSetGetStatus["members"].([]interface{}))
- if err != nil {
- return nil, zbxerr.ErrorCannotParseResult.Wrap(err)
- }
-
- jsonRes, err := json.Marshal(replSetGetStatus)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_server_status.go b/src/go/plugins/mongodb/handler_server_status.go
deleted file mode 100644
index f5ee02dd4a9..00000000000
--- a/src/go/plugins/mongodb/handler_server_status.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2/bson"
-)
-
-// serverStatusHandler
-// https://docs.mongodb.com/manual/reference/command/serverStatus/#dbcmd.serverStatus
-func serverStatusHandler(s Session, _ map[string]string) (interface{}, error) {
- serverStatus := &bson.M{}
- err := s.DB("admin").Run(&bson.D{
- bson.DocElem{
- Name: "serverStatus",
- Value: 1,
- },
- bson.DocElem{
- Name: "recordStats",
- Value: 0,
- },
- bson.DocElem{
- Name: "maxTimeMS",
- Value: s.GetMaxTimeMS(),
- },
- }, serverStatus)
-
- if err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- jsonRes, err := json.Marshal(serverStatus)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonRes), nil
-}
diff --git a/src/go/plugins/mongodb/handler_server_status_test.go b/src/go/plugins/mongodb/handler_server_status_test.go
deleted file mode 100644
index 601d7743c3c..00000000000
--- a/src/go/plugins/mongodb/handler_server_status_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package mongodb
-
-import (
- "encoding/json"
- "errors"
- "io/ioutil"
- "log"
- "reflect"
- "strings"
- "testing"
-
- "gopkg.in/mgo.v2/bson"
-)
-
-func Test_serverStatusHandler(t *testing.T) {
- var testData map[string]interface{}
-
- jsonData, err := ioutil.ReadFile("testdata/serverStatus.json")
- if err != nil {
- log.Fatal(err)
- }
-
- err = json.Unmarshal(jsonData, &testData)
- if err != nil {
- log.Fatal(err)
- }
-
- mockSession := NewMockConn()
- db := mockSession.DB("admin")
- db.(*MockMongoDatabase).RunFunc = func(dbName, cmd string) ([]byte, error) {
- if cmd == "serverStatus" {
- return bson.Marshal(testData)
- }
-
- return nil, errors.New("no such cmd: " + cmd)
- }
-
- type args struct {
- s Session
- }
-
- tests := []struct {
- name string
- args args
- want interface{}
- wantErr error
- }{
- {
- name: "Must parse an output of \" + serverStatus + \"command",
- args: args{
- s: mockSession,
- },
- want: strings.TrimSpace(string(jsonData)),
- wantErr: nil,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := serverStatusHandler(tt.args.s, nil)
- if !errors.Is(err, tt.wantErr) {
- t.Errorf("serverStatusHandler() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
-
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("serverStatusHandler() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/go/plugins/mongodb/handler_shards_discovery.go b/src/go/plugins/mongodb/handler_shards_discovery.go
deleted file mode 100644
index d8cb1f1c3f0..00000000000
--- a/src/go/plugins/mongodb/handler_shards_discovery.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "encoding/json"
- "fmt"
- "net"
- "strings"
- "time"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
-)
-
-type lldShEntity struct {
- ID string `json:"{#ID}"`
- Hostname string `json:"{#HOSTNAME}"`
- MongodURI string `json:"{#MONGOD_URI}"`
- State string `json:"{#STATE}"`
-}
-
-type shEntry struct {
- ID string `bson:"_id"`
- Host string `bson:"host"`
- State json.Number `bson:"state"`
-}
-
-// shardsDiscoveryHandler
-// https://docs.mongodb.com/manual/reference/method/sh.status/#sh.status
-func shardsDiscoveryHandler(s Session, _ map[string]string) (interface{}, error) {
- var shards []shEntry
-
- if err := s.DB("config").C("shards").Find(nil).Sort(sortAsc).
- SetMaxTime(time.Duration(s.GetMaxTimeMS()) * time.Millisecond).
- All(&shards); err != nil {
- return nil, zbxerr.ErrorCannotFetchData.Wrap(err)
- }
-
- lld := make([]lldShEntity, 0)
-
- for _, sh := range shards {
- hosts := sh.Host
-
- h := strings.SplitN(sh.Host, "/", 2)
- if len(h) > 1 {
- hosts = h[1]
- }
-
- for _, hostport := range strings.Split(hosts, ",") {
- host, _, err := net.SplitHostPort(hostport)
- if err != nil {
- return nil, zbxerr.ErrorCannotParseResult.Wrap(err)
- }
-
- lld = append(lld, lldShEntity{
- ID: sh.ID,
- Hostname: host,
- MongodURI: fmt.Sprintf("%s://%s", uriDefaults.Scheme, hostport),
- State: sh.State.String(),
- })
- }
- }
-
- jsonLLD, err := json.Marshal(lld)
- if err != nil {
- return nil, zbxerr.ErrorCannotMarshalJSON.Wrap(err)
- }
-
- return string(jsonLLD), nil
-}
diff --git a/src/go/plugins/mongodb/metrics.go b/src/go/plugins/mongodb/metrics.go
deleted file mode 100644
index 9767eac9996..00000000000
--- a/src/go/plugins/mongodb/metrics.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "git.zabbix.com/ap/plugin-support/metric"
- "git.zabbix.com/ap/plugin-support/plugin"
- "git.zabbix.com/ap/plugin-support/uri"
-)
-
-// handlerFunc defines an interface must be implemented by handlers.
-type handlerFunc func(s Session, params map[string]string) (res interface{}, err error)
-
-// getHandlerFunc returns a handlerFunc related to a given key.
-func getHandlerFunc(key string) handlerFunc {
- switch key {
- case keyConfigDiscovery:
- return configDiscoveryHandler
-
- case keyCollectionStats:
- return collectionStatsHandler
-
- case keyCollectionsDiscovery:
- return collectionsDiscoveryHandler
-
- case keyCollectionsUsage:
- return collectionsUsageHandler
-
- case keyConnPoolStats:
- return connPoolStatsHandler
-
- case keyDatabaseStats:
- return databaseStatsHandler
-
- case keyDatabasesDiscovery:
- return databasesDiscoveryHandler
-
- case keyJumboChunks:
- return jumboChunksHandler
-
- case keyOplogStats:
- return oplogStatsHandler
-
- case keyPing:
- return pingHandler
-
- case keyReplSetConfig:
- return replSetConfigHandler
-
- case keyReplSetStatus:
- return replSetStatusHandler
-
- case keyServerStatus:
- return serverStatusHandler
-
- case keyShardsDiscovery:
- return shardsDiscoveryHandler
-
- default:
- return nil
- }
-}
-
-const (
- keyConfigDiscovery = "mongodb.cfg.discovery"
- keyCollectionStats = "mongodb.collection.stats"
- keyCollectionsDiscovery = "mongodb.collections.discovery"
- keyCollectionsUsage = "mongodb.collections.usage"
- keyConnPoolStats = "mongodb.connpool.stats"
- keyDatabaseStats = "mongodb.db.stats"
- keyDatabasesDiscovery = "mongodb.db.discovery"
- keyJumboChunks = "mongodb.jumbo_chunks.count"
- keyOplogStats = "mongodb.oplog.stats"
- keyPing = "mongodb.ping"
- keyReplSetConfig = "mongodb.rs.config"
- keyReplSetStatus = "mongodb.rs.status"
- keyServerStatus = "mongodb.server.status"
- keyShardsDiscovery = "mongodb.sh.discovery"
-)
-
-var uriDefaults = &uri.Defaults{Scheme: "tcp", Port: "27017"}
-
-// Common params: [URI|Session][,User][,Password]
-var (
- paramURI = metric.NewConnParam("URI", "URI to connect or session name.").
- WithDefault(uriDefaults.Scheme + "://localhost:" + uriDefaults.Port).WithSession().
- WithValidator(uri.URIValidator{Defaults: uriDefaults, AllowedSchemes: []string{"tcp"}})
- paramUser = metric.NewConnParam("User", "MongoDB user.")
- paramPassword = metric.NewConnParam("Password", "User's password.")
- paramDatabase = metric.NewParam("Database", "Database name.").WithDefault("admin")
- paramCollection = metric.NewParam("Collection", "Collection name.").SetRequired()
-)
-
-var metrics = metric.MetricSet{
- keyConfigDiscovery: metric.New("Returns a list of discovered config servers.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyCollectionStats: metric.New("Returns a variety of storage statistics for a given collection.",
- []*metric.Param{paramURI, paramUser, paramPassword, paramDatabase, paramCollection}, false),
-
- keyCollectionsDiscovery: metric.New("Returns a list of discovered collections.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyCollectionsUsage: metric.New("Returns usage statistics for collections.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyConnPoolStats: metric.New("Returns information regarding the open outgoing connections from the "+
- "current database instance to other members of the sharded cluster or replica set.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyDatabaseStats: metric.New("Returns statistics reflecting a given database system’s state.",
- []*metric.Param{paramURI, paramUser, paramPassword, paramDatabase}, false),
-
- keyDatabasesDiscovery: metric.New("Returns a list of discovered databases.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyJumboChunks: metric.New("Returns count of jumbo chunks.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyOplogStats: metric.New("Returns a status of the replica set, using data polled from the oplog.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyPing: metric.New("Test if connection is alive or not.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyReplSetConfig: metric.New("Returns a current configuration of the replica set.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyReplSetStatus: metric.New("Returns a replica set status from the point of view of the member "+
- "where the method is run.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyServerStatus: metric.New("Returns a database’s state.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-
- keyShardsDiscovery: metric.New("Returns a list of discovered shards present in the cluster.",
- []*metric.Param{paramURI, paramUser, paramPassword}, false),
-}
-
-func init() {
- plugin.RegisterMetrics(&impl, pluginName, metrics.List()...)
-}
diff --git a/src/go/plugins/mongodb/mockconn.go b/src/go/plugins/mongodb/mockconn.go
deleted file mode 100644
index 09f9cce71c4..00000000000
--- a/src/go/plugins/mongodb/mockconn.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package mongodb
-
-import (
- "errors"
- "fmt"
- "time"
-
- "git.zabbix.com/ap/plugin-support/zbxerr"
- "gopkg.in/mgo.v2"
- "gopkg.in/mgo.v2/bson"
-)
-
-const (
- mustFail = "mustFail"
-)
-
-type MockConn struct {
- dbs map[string]*MockMongoDatabase
-}
-
-func NewMockConn() *MockConn {
- return &MockConn{
- dbs: make(map[string]*MockMongoDatabase),
- }
-}
-
-func (conn *MockConn) DB(name string) Database {
- if db, ok := conn.dbs[name]; ok {
- return db
- }
-
- conn.dbs[name] = &MockMongoDatabase{
- name: name,
- collections: make(map[string]*MockMongoCollection),
- }
-
- return conn.dbs[name]
-}
-
-func (conn *MockConn) DatabaseNames() (names []string, err error) {
- for _, db := range conn.dbs {
- if db.name == mustFail {
- return nil, zbxerr.ErrorCannotFetchData
- }
-
- names = append(names, db.name)
- }
-
- return
-}
-
-func (conn *MockConn) Ping() error {
- return nil
-}
-
-func (conn *MockConn) GetMaxTimeMS() int64 {
- return 3000
-}
-
-type MockSession interface {
- DB(name string) Database
- DatabaseNames() (names []string, err error)
- GetMaxTimeMS() int64
- Ping() error
-}
-
-type MockMongoDatabase struct {
- name string
- collections map[string]*MockMongoCollection
- RunFunc func(dbName, cmd string) ([]byte, error)
-}
-
-func (d *MockMongoDatabase) C(name string) Collection {
- if col, ok := d.collections[name]; ok {
- return col
- }
-
- d.collections[name] = &MockMongoCollection{
- name: name,
- queries: make(map[interface{}]*MockMongoQuery),
- }
-
- return d.collections[name]
-}
-
-func (d *MockMongoDatabase) CollectionNames() (names []string, err error) {
- for _, col := range d.collections {
- if col.name == mustFail {
- return nil, errors.New("fail")
- }
-
- names = append(names, col.name)
- }
-
- return
-}
-
-func (d *MockMongoDatabase) Run(cmd, result interface{}) error {
- if d.RunFunc == nil {
- d.RunFunc = func(dbName, _ string) ([]byte, error) {
- if dbName == mustFail {
- return nil, errors.New("fail")
- }
-
- return bson.Marshal(map[string]int{"ok": 1})
- }
- }
-
- if result == nil {
- return nil
- }
-
- bsonDcmd := *(cmd.(*bson.D))
- cmdName := bsonDcmd[0].Name
-
- data, err := d.RunFunc(d.name, cmdName)
- if err != nil {
- return err
- }
-
- return bson.Unmarshal(data, result)
-}
-
-type MockMongoCollection struct {
- name string
- queries map[interface{}]*MockMongoQuery
-}
-
-func (c *MockMongoCollection) Find(query interface{}) Query {
- queryHash := fmt.Sprintf("%v", query)
- if q, ok := c.queries[queryHash]; ok {
- return q
- }
-
- c.queries[queryHash] = &MockMongoQuery{
- collection: c.name,
- query: query,
- }
-
- return c.queries[queryHash]
-}
-
-type MockMongoQuery struct {
- collection string
- query interface{}
- sortFields []string
- DataFunc func(collection string, query interface{}, sortFields ...string) ([]byte, error)
-}
-
-func (q *MockMongoQuery) retrieve(result interface{}) error {
- if q.DataFunc == nil {
- return mgo.ErrNotFound
- }
-
- if result == nil {
- return nil
- }
-
- data, err := q.DataFunc(q.collection, q.query, q.sortFields...)
- if err != nil {
- return err
- }
-
- return bson.Unmarshal(data, result)
-}
-
-func (q *MockMongoQuery) All(result interface{}) error {
- return q.retrieve(result)
-}
-
-func (q *MockMongoQuery) Count() (n int, err error) {
- return 1, nil
-}
-
-func (q *MockMongoQuery) Limit(n int) Query {
- return q
-}
-
-func (q *MockMongoQuery) One(result interface{}) error {
- return q.retrieve(result)
-}
-
-func (q *MockMongoQuery) SetMaxTime(_ time.Duration) Query {
- return q
-}
-
-func (q *MockMongoQuery) Sort(fields ...string) Query {
- q.sortFields = fields
- return q
-}
diff --git a/src/go/plugins/mongodb/mongodb.go b/src/go/plugins/mongodb/mongodb.go
deleted file mode 100644
index 4b5b8284b03..00000000000
--- a/src/go/plugins/mongodb/mongodb.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
-** Zabbix
-** Copyright (C) 2001-2022 Zabbix SIA
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-**/
-
-package mongodb
-
-import (
- "time"
-
- "gopkg.in/mgo.v2"
-
- "git.zabbix.com/ap/plugin-support/uri"
- "git.zabbix.com/ap/plugin-support/zbxerr"
-
- "git.zabbix.com/ap/plugin-support/plugin"
-)
-
-const pluginName = "Mongo"
-
-const hkInterval = 10
-
-// Plugin inherits plugin.Base and store plugin-specific data.
-type Plugin struct {
- plugin.Base
- connMgr *ConnManager
- options PluginOptions
-}
-
-// impl is the pointer to the plugin implementation.
-var impl Plugin
-
-// Export implements the Exporter interface.
-func (p *Plugin) Export(key string, rawParams []string, _ plugin.ContextProvider) (result interface{}, err error) {
- params, _, err := metrics[key].EvalParams(rawParams, p.options.Sessions)
- if err != nil {
- return nil, err
- }
-
- uri, err := uri.NewWithCreds(params["URI"], params["User"], params["Password"], uriDefaults)
- if err != nil {
- return nil, err
- }
-
- handleMetric := getHandlerFunc(key)
- if handleMetric == nil {
- return nil, zbxerr.ErrorUnsupportedMetric
- }
-
- conn, err := p.connMgr.GetConnection(*uri)
- if err != nil {
- // Special logic of processing connection errors should be used if mongodb.ping is requested
- // because it must return pingFailed if any error occurred.
- if key == keyPing {
- return pingFailed, nil
- }
-
- p.Errf(err.Error())
-
- return nil, err
- }
-
- result, err = handleMetric(conn, params)
- if err != nil {
- p.Errf(err.Error())
- }
-
- return result, err
-}
-
-// Start implements the Runner interface and performs initialization when plugin is activated.
-func (p *Plugin) Start() {
- p.connMgr = NewConnManager(
- time.Duration(p.options.KeepAlive)*time.Second,
- time.Duration(p.options.Timeout)*time.Second,
- hkInterval*time.Second,
- )
-}
-
-// Stop implements the Runner interface and frees resources when plugin is deactivated.
-func (p *Plugin) Stop() {
- p.connMgr.Destroy()
- p.connMgr = nil
-}
-
-type MongoLogger struct {
- Debugf func(format string, args ...interface{})
-}
-
-func (l MongoLogger) Output(_ int, msg string) error {
- l.Debugf(msg)
- return nil
-}
-
-func init() {
- logger := MongoLogger{Debugf: impl.Tracef}
-
- mgo.SetDebug(true)
- mgo.SetLogger(logger)
-}
diff --git a/src/go/plugins/mongodb/testdata/collStats.json b/src/go/plugins/mongodb/testdata/collStats.json
deleted file mode 100644
index efb07b16c9a..00000000000
--- a/src/go/plugins/mongodb/testdata/collStats.json
+++ /dev/null
@@ -1 +0,0 @@
-{"capped":false,"count":0,"indexDetails":{"_id_":{"LSM":{"bloom filter false positives":0,"bloom filter hits":0,"bloom filter misses":0,"bloom filter pages evicted from cache":0,"bloom filter pages read into cache":0,"bloom filters in the LSM tree":0,"chunks in the LSM tree":0,"highest merge generation in the LSM tree":0,"queries that could have benefited from a Bloom filter that did not exist":0,"sleep for LSM checkpoint throttle":0,"sleep for LSM merge throttle":0,"total size of bloom filters":0},"block-manager":{"allocations requiring file extension":0,"blocks allocated":0,"blocks freed":0,"checkpoint size":0,"file allocation unit size":4096,"file bytes available for reuse":0,"file magic number":120897,"file major version number":1,"file size in bytes":4096,"minor version number":0},"btree":{"btree checkpoint generation":70825,"column-store fixed-size leaf pages":0,"column-store internal pages":0,"column-store variable-size RLE encoded values":0,"column-store variable-size deleted values":0,"column-store variable-size leaf pages":0,"fixed-record size":0,"maximum internal page key size":1474,"maximum internal page size":16384,"maximum leaf page key size":1474,"maximum leaf page size":16384,"maximum leaf page value size":7372,"maximum tree depth":0,"number of key/value pairs":0,"overflow pages":0,"pages rewritten by compaction":0,"row-store internal pages":0,"row-store leaf pages":0},"cache":{"bytes currently in the cache":182,"bytes dirty in the cache cumulative":0,"bytes read into cache":0,"bytes written from cache":0,"checkpoint blocked page eviction":0,"data source pages selected for eviction unable to be evicted":0,"eviction walk passes of a file":0,"eviction walk target pages histogram - 0-9":0,"eviction walk target pages histogram - 10-31":0,"eviction walk target pages histogram - 128 and higher":0,"eviction walk target pages histogram - 32-63":0,"eviction walk target pages histogram - 64-128":0,"eviction walks abandoned":0,"eviction walks gave up because they restarted their walk twice":0,"eviction walks gave up because they saw too many pages and found no candidates":0,"eviction walks gave up because they saw too many pages and found too few candidates":0,"eviction walks reached end of tree":0,"eviction walks started from root of tree":0,"eviction walks started from saved location in tree":0,"hazard pointer blocked page eviction":0,"in-memory page passed criteria to be split":0,"in-memory page splits":0,"internal pages evicted":0,"internal pages split during eviction":0,"leaf pages split during eviction":0,"modified pages evicted":0,"overflow pages read into cache":0,"page split during eviction deepened the tree":0,"page written requiring cache overflow records":0,"pages read into cache":0,"pages read into cache after truncate":0,"pages read into cache after truncate in prepare state":0,"pages read into cache requiring cache overflow entries":0,"pages requested from the cache":0,"pages seen by eviction walk":0,"pages written from cache":0,"pages written requiring in-memory restoration":0,"tracked dirty bytes in the cache":0,"unmodified pages evicted":0},"cache_walk":{"Average difference between current eviction generation when the page was last considered":0,"Average on-disk page image size seen":0,"Average time in cache for pages that have been visited by the eviction server":0,"Average time in cache for pages that have not been visited by the eviction server":0,"Clean pages currently in cache":0,"Current eviction generation":0,"Dirty pages currently in cache":0,"Entries in the root page":0,"Internal pages currently in cache":0,"Leaf pages currently in cache":0,"Maximum difference between current eviction generation when the page was last considered":0,"Maximum page size seen":0,"Minimum on-disk page image size seen":0,"Number of pages never visited by eviction server":0,"On-disk page image sizes smaller than a single allocation unit":0,"Pages created in memory and never written":0,"Pages currently queued for eviction":0,"Pages that could not be queued for eviction":0,"Refs skipped during cache traversal":0,"Size of the root page":0,"Total number of pages currently in cache":0},"compression":{"compressed pages read":0,"compressed pages written":0,"page written failed to compress":0,"page written was too small to compress":0},"creationString":"access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=8,infoObj={ \"v\" : 2, \"key\" : { \"_id\" : 1 }, \"name\" : \"_id_\", \"ns\" : \"MyDatabase.MyCollection\" }),assert=(commit_timestamp=none,read_timestamp=none),block_allocation=best,block_compressor=,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=16k,key_format=u,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=16k,leaf_value_max=0,log=(enabled=false),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_custom=(prefix=,start_generation=0,suffix=),merge_max=15,merge_min=0),memory_page_image_max=0,memory_page_max=5MB,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=true,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u","cursor":{"bulk-loaded cursor-insert calls":0,"close calls that result in cache":0,"create calls":0,"cursor operation restarted":0,"cursor-insert key and value bytes inserted":0,"cursor-remove key bytes removed":0,"cursor-update value bytes updated":0,"cursors reused from cache":0,"insert calls":0,"modify calls":0,"next calls":0,"open cursor count":0,"prev calls":0,"remove calls":0,"reserve calls":0,"reset calls":0,"search calls":0,"search near calls":0,"truncate calls":0,"update calls":0},"metadata":{"formatVersion":8,"infoObj":"{ \"v\" : 2, \"key\" : { \"_id\" : 1 }, \"name\" : \"_id_\", \"ns\" : \"MyDatabase.MyCollection\" }"},"reconciliation":{"dictionary matches":0,"fast-path pages deleted":0,"internal page key bytes discarded using suffix compression":0,"internal page multi-block writes":0,"internal-page overflow keys":0,"leaf page key bytes discarded using prefix compression":0,"leaf page multi-block writes":0,"leaf-page overflow keys":0,"maximum blocks required for a page":0,"overflow values written":0,"page checksum matches":0,"page reconciliation calls":0,"page reconciliation calls for eviction":0,"pages deleted":0},"session":{"object compaction":0},"transaction":{"update conflicts":0},"type":"file","uri":"statistics:table:index-28--7017182801192397941"},"supplierId_hashed":{"LSM":{"bloom filter false positives":0,"bloom filter hits":0,"bloom filter misses":0,"bloom filter pages evicted from cache":0,"bloom filter pages read into cache":0,"bloom filters in the LSM tree":0,"chunks in the LSM tree":0,"highest merge generation in the LSM tree":0,"queries that could have benefited from a Bloom filter that did not exist":0,"sleep for LSM checkpoint throttle":0,"sleep for LSM merge throttle":0,"total size of bloom filters":0},"block-manager":{"allocations requiring file extension":0,"blocks allocated":0,"blocks freed":0,"checkpoint size":0,"file allocation unit size":4096,"file bytes available for reuse":0,"file magic number":120897,"file major version number":1,"file size in bytes":4096,"minor version number":0},"btree":{"btree checkpoint generation":70825,"column-store fixed-size leaf pages":0,"column-store internal pages":0,"column-store variable-size RLE encoded values":0,"column-store variable-size deleted values":0,"column-store variable-size leaf pages":0,"fixed-record size":0,"maximum internal page key size":1474,"maximum internal page size":16384,"maximum leaf page key size":1474,"maximum leaf page size":16384,"maximum leaf page value size":7372,"maximum tree depth":0,"number of key/value pairs":0,"overflow pages":0,"pages rewritten by compaction":0,"row-store internal pages":0,"row-store leaf pages":0},"cache":{"bytes currently in the cache":182,"bytes dirty in the cache cumulative":0,"bytes read into cache":0,"bytes written from cache":0,"checkpoint blocked page eviction":0,"data source pages selected for eviction unable to be evicted":0,"eviction walk passes of a file":0,"eviction walk target pages histogram - 0-9":0,"eviction walk target pages histogram - 10-31":0,"eviction walk target pages histogram - 128 and higher":0,"eviction walk target pages histogram - 32-63":0,"eviction walk target pages histogram - 64-128":0,"eviction walks abandoned":0,"eviction walks gave up because they restarted their walk twice":0,"eviction walks gave up because they saw too many pages and found no candidates":0,"eviction walks gave up because they saw too many pages and found too few candidates":0,"eviction walks reached end of tree":0,"eviction walks started from root of tree":0,"eviction walks started from saved location in tree":0,"hazard pointer blocked page eviction":0,"in-memory page passed criteria to be split":0,"in-memory page splits":0,"internal pages evicted":0,"internal pages split during eviction":0,"leaf pages split during eviction":0,"modified pages evicted":0,"overflow pages read into cache":0,"page split during eviction deepened the tree":0,"page written requiring cache overflow records":0,"pages read into cache":0,"pages read into cache after truncate":0,"pages read into cache after truncate in prepare state":0,"pages read into cache requiring cache overflow entries":0,"pages requested from the cache":0,"pages seen by eviction walk":0,"pages written from cache":0,"pages written requiring in-memory restoration":0,"tracked dirty bytes in the cache":0,"unmodified pages evicted":0},"cache_walk":{"Average difference between current eviction generation when the page was last considered":0,"Average on-disk page image size seen":0,"Average time in cache for pages that have been visited by the eviction server":0,"Average time in cache for pages that have not been visited by the eviction server":0,"Clean pages currently in cache":0,"Current eviction generation":0,"Dirty pages currently in cache":0,"Entries in the root page":0,"Internal pages currently in cache":0,"Leaf pages currently in cache":0,"Maximum difference between current eviction generation when the page was last considered":0,"Maximum page size seen":0,"Minimum on-disk page image size seen":0,"Number of pages never visited by eviction server":0,"On-disk page image sizes smaller than a single allocation unit":0,"Pages created in memory and never written":0,"Pages currently queued for eviction":0,"Pages that could not be queued for eviction":0,"Refs skipped during cache traversal":0,"Size of the root page":0,"Total number of pages currently in cache":0},"compression":{"compressed pages read":0,"compressed pages written":0,"page written failed to compress":0,"page written was too small to compress":0},"creationString":"access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=8,infoObj={ \"v\" : 2, \"key\" : { \"supplierId\" : \"hashed\" }, \"name\" : \"supplierId_hashed\", \"ns\" : \"MyDatabase.MyCollection\" }),assert=(commit_timestamp=none,read_timestamp=none),block_allocation=best,block_compressor=,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=16k,key_format=u,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=16k,leaf_value_max=0,log=(enabled=false),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_custom=(prefix=,start_generation=0,suffix=),merge_max=15,merge_min=0),memory_page_image_max=0,memory_page_max=5MB,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=true,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u","cursor":{"bulk-loaded cursor-insert calls":0,"close calls that result in cache":0,"create calls":0,"cursor operation restarted":0,"cursor-insert key and value bytes inserted":0,"cursor-remove key bytes removed":0,"cursor-update value bytes updated":0,"cursors reused from cache":0,"insert calls":0,"modify calls":0,"next calls":0,"open cursor count":0,"prev calls":0,"remove calls":0,"reserve calls":0,"reset calls":0,"search calls":0,"search near calls":0,"truncate calls":0,"update calls":0},"metadata":{"formatVersion":8,"infoObj":"{ \"v\" : 2, \"key\" : { \"supplierId\" : \"hashed\" }, \"name\" : \"supplierId_hashed\", \"ns\" : \"MyDatabase.MyCollection\" }"},"reconciliation":{"dictionary matches":0,"fast-path pages deleted":0,"internal page key bytes discarded using suffix compression":0,"internal page multi-block writes":0,"internal-page overflow keys":0,"leaf page key bytes discarded using prefix compression":0,"leaf page multi-block writes":0,"leaf-page overflow keys":0,"maximum blocks required for a page":0,"overflow values written":0,"page checksum matches":0,"page reconciliation calls":0,"page reconciliation calls for eviction":0,"pages deleted":0},"session":{"object compaction":0},"transaction":{"update conflicts":0},"type":"file","uri":"statistics:table:index-29--7017182801192397941"}},"indexSizes":{"_id_":4096,"supplierId_hashed":4096},"nindexes":2,"ns":"MyDatabase.MyCollection","ok":1,"size":0,"storageSize":4096,"totalIndexSize":8192,"wiredTiger":{"LSM":{"bloom filter false positives":0,"bloom filter hits":0,"bloom filter misses":0,"bloom filter pages evicted from cache":0,"bloom filter pages read into cache":0,"bloom filters in the LSM tree":0,"chunks in the LSM tree":0,"highest merge generation in the LSM tree":0,"queries that could have benefited from a Bloom filter that did not exist":0,"sleep for LSM checkpoint throttle":0,"sleep for LSM merge throttle":0,"total size of bloom filters":0},"block-manager":{"allocations requiring file extension":0,"blocks allocated":0,"blocks freed":0,"checkpoint size":0,"file allocation unit size":4096,"file bytes available for reuse":0,"file magic number":120897,"file major version number":1,"file size in bytes":4096,"minor version number":0},"btree":{"btree checkpoint generation":70825,"column-store fixed-size leaf pages":0,"column-store internal pages":0,"column-store variable-size RLE encoded values":0,"column-store variable-size deleted values":0,"column-store variable-size leaf pages":0,"fixed-record size":0,"maximum internal page key size":368,"maximum internal page size":4096,"maximum leaf page key size":2867,"maximum leaf page size":32768,"maximum leaf page value size":67108864,"maximum tree depth":0,"number of key/value pairs":0,"overflow pages":0,"pages rewritten by compaction":0,"row-store internal pages":0,"row-store leaf pages":0},"cache":{"bytes currently in the cache":182,"bytes dirty in the cache cumulative":0,"bytes read into cache":0,"bytes written from cache":0,"checkpoint blocked page eviction":0,"data source pages selected for eviction unable to be evicted":0,"eviction walk passes of a file":0,"eviction walk target pages histogram - 0-9":0,"eviction walk target pages histogram - 10-31":0,"eviction walk target pages histogram - 128 and higher":0,"eviction walk target pages histogram - 32-63":0,"eviction walk target pages histogram - 64-128":0,"eviction walks abandoned":0,"eviction walks gave up because they restarted their walk twice":0,"eviction walks gave up because they saw too many pages and found no candidates":0,"eviction walks gave up because they saw too many pages and found too few candidates":0,"eviction walks reached end of tree":0,"eviction walks started from root of tree":0,"eviction walks started from saved location in tree":0,"hazard pointer blocked page eviction":0,"in-memory page passed criteria to be split":0,"in-memory page splits":0,"internal pages evicted":0,"internal pages split during eviction":0,"leaf pages split during eviction":0,"modified pages evicted":0,"overflow pages read into cache":0,"page split during eviction deepened the tree":0,"page written requiring cache overflow records":0,"pages read into cache":0,"pages read into cache after truncate":0,"pages read into cache after truncate in prepare state":0,"pages read into cache requiring cache overflow entries":0,"pages requested from the cache":0,"pages seen by eviction walk":0,"pages written from cache":0,"pages written requiring in-memory restoration":0,"tracked dirty bytes in the cache":0,"unmodified pages evicted":0},"cache_walk":{"Average difference between current eviction generation when the page was last considered":0,"Average on-disk page image size seen":0,"Average time in cache for pages that have been visited by the eviction server":0,"Average time in cache for pages that have not been visited by the eviction server":0,"Clean pages currently in cache":0,"Current eviction generation":0,"Dirty pages currently in cache":0,"Entries in the root page":0,"Internal pages currently in cache":0,"Leaf pages currently in cache":0,"Maximum difference between current eviction generation when the page was last considered":0,"Maximum page size seen":0,"Minimum on-disk page image size seen":0,"Number of pages never visited by eviction server":0,"On-disk page image sizes smaller than a single allocation unit":0,"Pages created in memory and never written":0,"Pages currently queued for eviction":0,"Pages that could not be queued for eviction":0,"Refs skipped during cache traversal":0,"Size of the root page":0,"Total number of pages currently in cache":0},"compression":{"compressed pages read":0,"compressed pages written":0,"page written failed to compress":0,"page written was too small to compress":0},"creationString":"access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),assert=(commit_timestamp=none,read_timestamp=none),block_allocation=best,block_compressor=snappy,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=false),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_custom=(prefix=,start_generation=0,suffix=),merge_max=15,merge_min=0),memory_page_image_max=0,memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u","cursor":{"bulk-loaded cursor-insert calls":0,"close calls that result in cache":0,"create calls":0,"cursor operation restarted":0,"cursor-insert key and value bytes inserted":0,"cursor-remove key bytes removed":0,"cursor-update value bytes updated":0,"cursors reused from cache":0,"insert calls":0,"modify calls":0,"next calls":0,"open cursor count":0,"prev calls":0,"remove calls":0,"reserve calls":0,"reset calls":0,"search calls":0,"search near calls":0,"truncate calls":0,"update calls":0},"metadata":{"formatVersion":1},"reconciliation":{"dictionary matches":0,"fast-path pages deleted":0,"internal page key bytes discarded using suffix compression":0,"internal page multi-block writes":0,"internal-page overflow keys":0,"leaf page key bytes discarded using prefix compression":0,"leaf page multi-block writes":0,"leaf-page overflow keys":0,"maximum blocks required for a page":0,"overflow values written":0,"page checksum matches":0,"page reconciliation calls":0,"page reconciliation calls for eviction":0,"pages deleted":0},"session":{"object compaction":0},"transactiozn":{"update conflicts":0},"type":"file","uri":"statistics:table:collection-27--7017182801192397941"}}
diff --git a/src/go/plugins/mongodb/testdata/connPoolStats.json b/src/go/plugins/mongodb/testdata/connPoolStats.json
deleted file mode 100644
index c96fc3c69f1..00000000000
--- a/src/go/plugins/mongodb/testdata/connPoolStats.json
+++ /dev/null
@@ -1 +0,0 @@
-{"hosts":{"configsvr01:27017":{"available":2,"created":10,"inUse":0,"refreshing":0},"configsvr02:27017":{"available":2,"created":2,"inUse":0,"refreshing":0},"configsvr03:27017":{"available":2,"created":2,"inUse":0,"refreshing":0},"shard01-a:27017":{"available":2,"created":2,"inUse":0,"refreshing":0},"shard01-b:27017":{"available":2,"created":2,"inUse":0,"refreshing":0},"shard01-c:27017":{"available":2,"created":2,"inUse":0,"refreshing":0},"shard02-a:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard02-b:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard02-c:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard03-a:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard03-b:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard03-c:27017":{"available":1,"created":1,"inUse":0,"refreshing":0}},"lastCommittedOpTime":6926878449889968000,"numAScopedConnections":0,"numClientConnections":12,"ok":1,"operationTime":6926878449889968000,"pools":{"NetworkInterfaceTL-Replication":{"poolAvailable":2,"poolCreated":2,"poolInUse":0,"poolRefreshing":0,"shard01-b:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard01-c:27017":{"available":1,"created":1,"inUse":0,"refreshing":0}},"NetworkInterfaceTL-ShardRegistry":{"configsvr01:27017":{"available":1,"created":9,"inUse":0,"refreshing":0},"configsvr02:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"configsvr03:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"poolAvailable":3,"poolCreated":11,"poolInUse":0,"poolRefreshing":0},"NetworkInterfaceTL-TaskExecutorPool-0":{"poolAvailable":1,"poolCreated":1,"poolInUse":0,"poolRefreshing":0,"shard01-a:27017":{"available":1,"created":1,"inUse":0,"refreshing":0}},"global":{"configsvr01:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"configsvr02:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"configsvr03:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"poolAvailable":12,"poolCreated":12,"poolInUse":0,"poolRefreshing":0,"shard01-a:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard01-b:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard01-c:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard02-a:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard02-b:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard02-c:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard03-a:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard03-b:27017":{"available":1,"created":1,"inUse":0,"refreshing":0},"shard03-c:27017":{"available":1,"created":1,"inUse":0,"refreshing":0}}},"replicaSets":{"rs-config-server":{"hosts":[{"addr":"configsvr01:27017","hidden":false,"ismaster":true,"ok":true,"pingTimeMillis":0,"secondary":false},{"addr":"configsvr02:27017","hidden":false,"ismaster":false,"ok":true,"pingTimeMillis":0,"secondary":true},{"addr":"configsvr03:27017","hidden":false,"ismaster":false,"ok":true,"pingTimeMillis":0,"secondary":true}]},"rs-shard-01":{"hosts":[{"addr":"shard01-a:27017","hidden":false,"ismaster":true,"ok":true,"pingTimeMillis":0,"secondary":false},{"addr":"shard01-b:27017","hidden":false,"ismaster":false,"ok":true,"pingTimeMillis":0,"secondary":true},{"addr":"shard01-c:27017","hidden":false,"ismaster":false,"ok":true,"pingTimeMillis":0,"secondary":true}]},"rs-shard-02":{"hosts":[{"addr":"shard02-a:27017","hidden":false,"ismaster":true,"ok":true,"pingTimeMillis":0,"secondary":false},{"addr":"shard02-b:27017","hidden":false,"ismaster":false,"ok":true,"pingTimeMillis":0,"secondary":true},{"addr":"shard02-c:27017","hidden":false,"ismaster":false,"ok":true,"pingTimeMillis":0,"secondary":true}]},"rs-shard-03":{"hosts":[{"addr":"shard03-a:27017","hidden":false,"ismaster":true,"ok":true,"pingTimeMillis":0,"secondary":false},{"addr":"shard03-b:27017","hidden":false,"ismaster":false,"ok":true,"pingTimeMillis":0,"secondary":true},{"addr":"shard03-c:27017","hidden":false,"ismaster":false,"ok":true,"pingTimeMillis":0,"secondary":true}]}},"totalAvailable":18,"totalCreated":26,"totalInUse":0,"totalRefreshing":0}
diff --git a/src/go/plugins/mongodb/testdata/dbStats.json b/src/go/plugins/mongodb/testdata/dbStats.json
deleted file mode 100644
index 211ddd600f5..00000000000
--- a/src/go/plugins/mongodb/testdata/dbStats.json
+++ /dev/null
@@ -1 +0,0 @@
-{"avgObjSize":59,"collections":1,"dataSize":59,"db":"admin","fsTotalSize":67371577344,"fsUsedSize":8687353856,"indexSize":32768,"indexes":1,"objects":1,"ok":1,"scaleFactor":1,"storageSize":32768,"totalSize":65536,"views":0}
diff --git a/src/go/plugins/mongodb/testdata/replSetGetConfig.json b/src/go/plugins/mongodb/testdata/replSetGetConfig.json
deleted file mode 100644
index 3224800eea4..00000000000
--- a/src/go/plugins/mongodb/testdata/replSetGetConfig.json
+++ /dev/null
@@ -1 +0,0 @@
-{"config":{"_id":"rs-shard-01","members":[{"_id":0,"arbiterOnly":false,"buildIndexes":true,"hidden":false,"host":"shard01-a:27017","priority":1,"slaveDelay":0,"tags":{},"votes":1},{"_id":1,"arbiterOnly":false,"buildIndexes":true,"hidden":false,"host":"shard01-b:27017","priority":1,"slaveDelay":0,"tags":{},"votes":1},{"_id":2,"arbiterOnly":false,"buildIndexes":true,"hidden":false,"host":"shard01-c:27017","priority":1,"slaveDelay":0,"tags":{},"votes":1}],"protocolVersion":1,"settings":{"catchUpTakeoverDelayMillis":30000,"catchUpTimeoutMillis":-1,"chainingAllowed":true,"electionTimeoutMillis":10000,"getLastErrorDefaults":{"w":1,"wtimeout":0},"getLastErrorModes":{},"heartbeatIntervalMillis":2000,"heartbeatTimeoutSecs":10,"replicaSetId":"5fe0628084064df4684b5e4d"},"version":1,"writeConcernMajorityJournalDefault":true}}
diff --git a/src/go/plugins/mongodb/testdata/serverStatus.json b/src/go/plugins/mongodb/testdata/serverStatus.json
deleted file mode 100644
index 5a22d85f2eb..00000000000
--- a/src/go/plugins/mongodb/testdata/serverStatus.json
+++ /dev/null
@@ -1 +0,0 @@
-{"connections":{"active":1,"available":838859,"awaitingTopologyChanges":0,"current":1,"exhaustHello":0,"exhaustIsMaster":0,"totalCreated":8},"electionMetrics":{"averageCatchUpOps":0,"catchUpTakeover":{"called":0,"successful":0},"electionTimeout":{"called":0,"successful":0},"freezeTimeout":{"called":0,"successful":0},"numCatchUps":0,"numCatchUpsAlreadyCaughtUp":0,"numCatchUpsFailedWithError":0,"numCatchUpsFailedWithNewTerm":0,"numCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd":0,"numCatchUpsSkipped":0,"numCatchUpsSucceeded":0,"numCatchUpsTimedOut":0,"numStepDownsCausedByHigherTerm":0,"priorityTakeover":{"called":0,"successful":0},"stepUpCmd":{"called":0,"successful":0}},"extra_info":{"input_blocks":0,"involuntary_context_switches":8133,"maximum_resident_set_kb":97780,"note":"fields vary by platform","output_blocks":22096,"page_faults":0,"page_reclaims":17360,"system_time_us":18191069,"user_time_us":17360886,"voluntary_context_switches":152109},"flowControl":{"enabled":true,"isLagged":false,"isLaggedCount":0,"isLaggedTimeMicros":0,"locksPerKiloOp":0,"sustainerRate":0,"targetRateLimit":1000000000,"timeAcquiringMicros":4111},"freeMonitoring":{"state":"undecided"},"globalLock":{"activeClients":{"readers":0,"total":0,"writers":0},"currentQueue":{"readers":0,"total":0,"writers":0},"totalTime":4307530000},"host":"1f00645c8ab3","localTime":"2021-02-08T14:45:24.784+02:00","locks":{"Collection":{"acquireCount":{"W":2,"r":151,"w":71}},"Database":{"acquireCount":{"W":4,"r":126,"w":71}},"Global":{"acquireCount":{"W":4,"r":13049,"w":75}},"Mutex":{"acquireCount":{"r":200}},"ParallelBatchWriterMode":{"acquireCount":{"r":159}},"ReplicationStateTransition":{"acquireCount":{"w":13128}}},"logicalSessionRecordCache":{"activeSessionsCount":0,"lastSessionsCollectionJobCursorsClosed":0,"lastSessionsCollectionJobDurationMillis":10,"lastSessionsCollectionJobEntriesEnded":0,"lastSessionsCollectionJobEntriesRefreshed":0,"lastSessionsCollectionJobTimestamp":"2021-02-08T14:43:38.435+02:00","lastTransactionReaperJobDurationMillis":11,"lastTransactionReaperJobEntriesCleanedUp":0,"lastTransactionReaperJobTimestamp":"2021-02-08T14:43:38.435+02:00","sessionCatalogSize":0,"sessionsCollectionJobCount":15,"transactionReaperJobCount":15},"mem":{"bits":64,"resident":93,"supported":true,"virtual":1550},"metrics":{"aggStageCounters":{"$_internalInhibitOptimization":0,"$_internalSplitPipeline":0,"$addFields":0,"$bucket":0,"$bucketAuto":0,"$changeStream":0,"$collStats":0,"$count":0,"$currentOp":0,"$facet":0,"$geoNear":0,"$graphLookup":0,"$group":0,"$indexStats":0,"$limit":0,"$listLocalSessions":0,"$listSessions":0,"$lookup":0,"$match":0,"$merge":0,"$mergeCursors":0,"$out":0,"$planCacheStats":0,"$project":0,"$redact":0,"$replaceRoot":0,"$replaceWith":0,"$sample":0,"$set":0,"$skip":0,"$sort":0,"$sortByCount":0,"$unionWith":0,"$unset":0,"$unwind":0},"commands":{"\u003cUNKNOWN\u003e":0,"_addShard":{"failed":0,"total":0},"_cloneCollectionOptionsFromPrimaryShard":{"failed":0,"total":0},"_configsvrAddShard":{"failed":0,"total":0},"_configsvrAddShardToZone":{"failed":0,"total":0},"_configsvrBalancerCollectionStatus":{"failed":0,"total":0},"_configsvrBalancerStart":{"failed":0,"total":0},"_configsvrBalancerStatus":{"failed":0,"total":0},"_configsvrBalancerStop":{"failed":0,"total":0},"_configsvrClearJumboFlag":{"failed":0,"total":0},"_configsvrCommitChunkMerge":{"failed":0,"total":0},"_configsvrCommitChunkMigration":{"failed":0,"total":0},"_configsvrCommitChunkSplit":{"failed":0,"total":0},"_configsvrCommitMovePrimary":{"failed":0,"total":0},"_configsvrCreateCollection":{"failed":0,"total":0},"_configsvrCreateDatabase":{"failed":0,"total":0},"_configsvrDropCollection":{"failed":0,"total":0},"_configsvrDropDatabase":{"failed":0,"total":0},"_configsvrEnableSharding":{"failed":0,"total":0},"_configsvrEnsureChunkVersionIsGreaterThan":{"failed":0,"total":0},"_configsvrMoveChunk":{"failed":0,"total":0},"_configsvrMovePrimary":{"failed":0,"total":0},"_configsvrRefineCollectionShardKey":{"failed":0,"total":0},"_configsvrRemoveShard":{"failed":0,"total":0},"_configsvrRemoveShardFromZone":{"failed":0,"total":0},"_configsvrShardCollection":{"failed":0,"total":0},"_configsvrUpdateZoneKeyRange":{"failed":0,"total":0},"_flushDatabaseCacheUpdates":{"failed":0,"total":0},"_flushRoutingTableCacheUpdates":{"failed":0,"total":0},"_getNextSessionMods":{"failed":0,"total":0},"_getUserCacheGeneration":{"failed":0,"total":0},"_isSelf":{"failed":0,"total":0},"_killOperations":{"failed":0,"total":0},"_mergeAuthzCollections":{"failed":0,"total":0},"_migrateClone":{"failed":0,"total":0},"_recvChunkAbort":{"failed":0,"total":0},"_recvChunkCommit":{"failed":0,"total":0},"_recvChunkStart":{"failed":0,"total":0},"_recvChunkStatus":{"failed":0,"total":0},"_shardsvrCloneCatalogData":{"failed":0,"total":0},"_shardsvrMovePrimary":{"failed":0,"total":0},"_shardsvrShardCollection":{"failed":0,"total":0},"_transferMods":{"failed":0,"total":0},"abortTransaction":{"failed":0,"total":0},"aggregate":{"failed":0,"total":0},"appendOplogNote":{"failed":0,"total":0},"applyOps":{"failed":0,"total":0},"authenticate":{"failed":0,"total":0},"availableQueryOptions":{"failed":0,"total":0},"buildInfo":{"failed":0,"total":0},"checkShardingIndex":{"failed":0,"total":0},"cleanupOrphaned":{"failed":0,"total":0},"cloneCollectionAsCapped":{"failed":0,"total":0},"collMod":{"failed":0,"total":0},"collStats":{"failed":0,"total":0},"commitTransaction":{"failed":0,"total":0},"compact":{"failed":0,"total":0},"connPoolStats":{"failed":0,"total":0},"connPoolSync":{"failed":0,"total":0},"connectionStatus":{"failed":0,"total":0},"convertToCapped":{"failed":0,"total":0},"coordinateCommitTransaction":{"failed":0,"total":0},"count":{"failed":0,"total":0},"create":{"failed":0,"total":0},"createIndexes":{"failed":0,"total":0},"createRole":{"failed":0,"total":0},"createUser":{"failed":0,"total":0},"currentOp":{"failed":0,"total":0},"dataSize":{"failed":0,"total":0},"dbHash":{"failed":0,"total":0},"dbStats":{"failed":0,"total":0},"delete":{"failed":0,"total":0},"distinct":{"failed":0,"total":0},"driverOIDTest":{"failed":0,"total":0},"drop":{"failed":0,"total":0},"dropAllRolesFromDatabase":{"failed":0,"total":0},"dropAllUsersFromDatabase":{"failed":0,"total":0},"dropConnections":{"failed":0,"total":0},"dropDatabase":{"failed":0,"total":0},"dropIndexes":{"failed":0,"total":0},"dropRole":{"failed":0,"total":0},"dropUser":{"failed":0,"total":0},"endSessions":{"failed":0,"total":0},"explain":{"failed":0,"total":0},"features":{"failed":0,"total":0},"filemd5":{"failed":0,"total":0},"find":{"failed":0,"total":16},"findAndModify":{"arrayFilters":0,"failed":0,"pipeline":0,"total":0},"flushRouterConfig":{"failed":0,"total":0},"fsync":{"failed":0,"total":0},"fsyncUnlock":{"failed":0,"total":0},"geoSearch":{"failed":0,"total":0},"getCmdLineOpts":{"failed":0,"total":0},"getDatabaseVersion":{"failed":0,"total":0},"getDefaultRWConcern":{"failed":0,"total":0},"getDiagnosticData":{"failed":0,"total":0},"getFreeMonitoringStatus":{"failed":0,"total":0},"getLastError":{"failed":0,"total":0},"getLog":{"failed":0,"total":0},"getMore":{"failed":0,"total":0},"getParameter":{"failed":0,"total":0},"getShardMap":{"failed":0,"total":0},"getShardVersion":{"failed":0,"total":0},"getnonce":{"failed":0,"total":8},"grantPrivilegesToRole":{"failed":0,"total":0},"grantRolesToRole":{"failed":0,"total":0},"grantRolesToUser":{"failed":0,"total":0},"hello":{"failed":0,"total":0},"hostInfo":{"failed":0,"total":0},"insert":{"failed":0,"total":0},"internalRenameIfOptionsAndIndexesMatch":{"failed":0,"total":0},"invalidateUserCache":{"failed":0,"total":0},"isMaster":{"failed":0,"total":17},"killAllSessions":{"failed":0,"total":0},"killAllSessionsByPattern":{"failed":0,"total":0},"killCursors":{"failed":0,"total":0},"killOp":{"failed":0,"total":0},"killSessions":{"failed":0,"total":0},"listCollections":{"failed":0,"total":0},"listCommands":{"failed":0,"total":0},"listDatabases":{"failed":0,"total":0},"listIndexes":{"failed":0,"total":30},"lockInfo":{"failed":0,"total":0},"logRotate":{"failed":0,"total":0},"logout":{"failed":0,"total":0},"mapReduce":{"failed":0,"total":0},"mapreduce":{"shardedfinish":{"failed":0,"total":0}},"mergeChunks":{"failed":0,"total":0},"moveChunk":{"failed":0,"total":0},"ping":{"failed":0,"total":39},"planCacheClear":{"failed":0,"total":0},"planCacheClearFilters":{"failed":0,"total":0},"planCacheListFilters":{"failed":0,"total":0},"planCacheSetFilter":{"failed":0,"total":0},"prepareTransaction":{"failed":0,"total":0},"profile":{"failed":0,"total":0},"reIndex":{"failed":0,"total":0},"refreshSessions":{"failed":0,"total":0},"renameCollection":{"failed":0,"total":0},"repairDatabase":{"failed":0,"total":0},"replSetAbortPrimaryCatchUp":{"failed":0,"total":0},"replSetFreeze":{"failed":0,"total":0},"replSetGetConfig":{"failed":0,"total":0},"replSetGetRBID":{"failed":0,"total":0},"replSetGetStatus":{"failed":0,"total":0},"replSetHeartbeat":{"failed":0,"total":0},"replSetInitiate":{"failed":0,"total":0},"replSetMaintenance":{"failed":0,"total":0},"replSetReconfig":{"failed":0,"total":0},"replSetRequestVotes":{"failed":0,"total":0},"replSetResizeOplog":{"failed":0,"total":0},"replSetStepDown":{"failed":0,"total":0},"replSetStepDownWithForce":{"failed":0,"total":0},"replSetStepUp":{"failed":0,"total":0},"replSetSyncFrom":{"failed":0,"total":0},"replSetUpdatePosition":{"failed":0,"total":0},"resetError":{"failed":0,"total":0},"revokePrivilegesFromRole":{"failed":0,"total":0},"revokeRolesFromRole":{"failed":0,"total":0},"revokeRolesFromUser":{"failed":0,"total":0},"rolesInfo":{"failed":0,"total":0},"saslContinue":{"failed":0,"total":0},"saslStart":{"failed":0,"total":0},"serverStatus":{"failed":0,"total":7},"setDefaultRWConcern":{"failed":0,"total":0},"setFeatureCompatibilityVersion":{"failed":0,"total":0},"setFreeMonitoring":{"failed":0,"total":0},"setIndexCommitQuorum":{"failed":0,"total":0},"setParameter":{"failed":0,"total":0},"setShardVersion":{"failed":0,"total":0},"shardConnPoolStats":{"failed":0,"total":0},"shardingState":{"failed":0,"total":0},"shutdown":{"failed":0,"total":0},"splitChunk":{"failed":0,"total":0},"splitVector":{"failed":0,"total":0},"startRecordingTraffic":{"failed":0,"total":0},"startSession":{"failed":0,"total":0},"stopRecordingTraffic":{"failed":0,"total":0},"top":{"failed":0,"total":0},"unsetSharding":{"failed":0,"total":0},"update":{"arrayFilters":0,"failed":0,"pipeline":0,"total":0},"updateRole":{"failed":0,"total":0},"updateUser":{"failed":0,"total":0},"usersInfo":{"failed":0,"total":0},"validate":{"failed":0,"total":0},"voteCommitIndexBuild":{"failed":0,"total":0},"waitForFailPoint":{"failed":0,"total":0},"whatsmyuri":{"failed":0,"total":0}},"cursor":{"open":{"noTimeout":0,"pinned":0,"total":0},"timedOut":0},"document":{"deleted":0,"inserted":0,"returned":0,"updated":0},"getLastError":{"default":{"unsatisfiable":0,"wtimeouts":0},"wtime":{"num":0,"totalMillis":0},"wtimeouts":0},"operation":{"scanAndOrder":0,"writeConflicts":0},"query":{"planCacheTotalSizeEstimateBytes":0,"updateOneOpStyleBroadcastWithExactIDCount":0},"queryExecutor":{"collectionScans":{"nonTailable":0,"total":0},"scanned":0,"scannedObjects":0},"record":{"moves":0},"repl":{"apply":{"attemptsToBecomeSecondary":0,"batchSize":0,"batches":{"num":0,"totalMillis":0},"ops":0},"buffer":{"count":0,"maxSizeBytes":0,"sizeBytes":0},"executor":{"networkInterface":"DEPRECATED: getDiagnosticString is deprecated in NetworkInterfaceTL","pool":{"inProgressCount":0},"queues":{"networkInProgress":0,"sleepers":0},"shuttingDown":false,"unsignaledEvents":0},"initialSync":{"completed":0,"failedAttempts":0,"failures":0},"network":{"bytes":0,"getmores":{"num":0,"numEmptyBatches":0,"totalMillis":0},"notMasterLegacyUnacknowledgedWrites":0,"notMasterUnacknowledgedWrites":0,"oplogGetMoresProcessed":{"num":0,"totalMillis":0},"ops":0,"readersCreated":0,"replSetUpdatePosition":{"num":0}},"stateTransition":{"lastStateTransition":"","userOperationsKilled":0,"userOperationsRunning":0},"syncSource":{"numSelections":0,"numTimesChoseDifferent":0,"numTimesChoseSame":0,"numTimesCouldNotFind":0}},"ttl":{"deletedDocuments":0,"passes":71}},"network":{"bytesIn":4394,"bytesOut":261528,"compression":{"snappy":{"compressor":{"bytesIn":0,"bytesOut":0},"decompressor":{"bytesIn":0,"bytesOut":0}},"zlib":{"compressor":{"bytesIn":0,"bytesOut":0},"decompressor":{"bytesIn":0,"bytesOut":0}},"zstd":{"compressor":{"bytesIn":0,"bytesOut":0},"decompressor":{"bytesIn":0,"bytesOut":0}}},"numRequests":71,"numSlowDNSOperations":0,"numSlowSSLOperations":0,"physicalBytesIn":4394,"physicalBytesOut":261528,"serviceExecutorTaskStats":{"executor":"passthrough","threadsRunning":1},"tcpFastOpen":{"accepted":0,"clientSupported":true,"kernelSetting":1,"serverSupported":true}},"ok":1,"opLatencies":{"commands":{"latency":8911,"ops":70},"reads":{"latency":0,"ops":0},"transactions":{"latency":0,"ops":0},"writes":{"latency":0,"ops":0}},"opReadConcernCounters":{"available":0,"linearizable":0,"local":0,"majority":0,"none":16,"snapshot":0},"opcounters":{"command":101,"delete":0,"getmore":0,"insert":0,"query":16,"update":0},"opcountersRepl":{"command":0,"delete":0,"getmore":0,"insert":0,"query":0,"update":0},"pid":1,"process":"mongod","security":{"authentication":{"mechanisms":{"MONGODB-X509":{"authenticate":{"received":0,"successful":0},"speculativeAuthenticate":{"received":0,"successful":0}},"SCRAM-SHA-1":{"authenticate":{"received":0,"successful":0},"speculativeAuthenticate":{"received":0,"successful":0}},"SCRAM-SHA-256":{"authenticate":{"received":0,"successful":0},"speculativeAuthenticate":{"received":0,"successful":0}}}}},"storageEngine":{"backupCursorOpen":false,"dropPendingIdents":0,"name":"wiredTiger","oldestRequiredTimestampForCrashRecovery":0,"persistent":true,"readOnly":false,"supportsCommittedReads":true,"supportsPendingDrops":true,"supportsSnapshotReadConcern":true,"supportsTwoPhaseIndexBuild":true},"tcmalloc":{"generic":{"current_allocated_bytes":84765480,"heap_size":89341952},"tcmalloc":{"aggressive_memory_decommit":0,"central_cache_free_bytes":224512,"current_total_thread_cache_bytes":729944,"formattedString":"------------------------------------------------\nMALLOC: 84766056 ( 80.8 MiB) Bytes in use by application\nMALLOC: + 3330048 ( 3.2 MiB) Bytes in page heap freelist\nMALLOC: + 224512 ( 0.2 MiB) Bytes in central cache freelist\nMALLOC: + 291968 ( 0.3 MiB) Bytes in transfer cache freelist\nMALLOC: + 729368 ( 0.7 MiB) Bytes in thread cache freelists\nMALLOC: + 2752512 ( 2.6 MiB) Bytes in malloc metadata\nMALLOC: ------------\nMALLOC: = 92094464 ( 87.8 MiB) Actual memory used (physical + swap)\nMALLOC: + 0 ( 0.0 MiB) Bytes released to OS (aka unmapped)\nMALLOC: ------------\nMALLOC: = 92094464 ( 87.8 MiB) Virtual address space used\nMALLOC:\nMALLOC: 674 Spans in use\nMALLOC: 31 Thread heaps in use\nMALLOC: 4096 Tcmalloc page size\n------------------------------------------------\nCall ReleaseFreeMemory() to release freelist memory to the OS (via madvise()).\nBytes released to the OS take up virtual address space but no physical memory.\n","max_total_thread_cache_bytes":260046848,"pageheap_commit_count":62,"pageheap_committed_bytes":89341952,"pageheap_decommit_count":1,"pageheap_free_bytes":3330048,"pageheap_reserve_count":47,"pageheap_scavenge_count":1,"pageheap_total_commit_bytes":92364800,"pageheap_total_decommit_bytes":3022848,"pageheap_total_reserve_bytes":89341952,"pageheap_unmapped_bytes":0,"release_rate":1,"spinlock_total_delay_ns":0,"thread_cache_free_bytes":729944,"total_free_bytes":1246424,"transfer_cache_free_bytes":291968}},"trafficRecording":{"running":false},"transactions":{"currentActive":0,"currentInactive":0,"currentOpen":0,"currentPrepared":0,"retriedCommandsCount":0,"retriedStatementsCount":0,"totalAborted":0,"totalCommitted":0,"totalPrepared":0,"totalPreparedThenAborted":0,"totalPreparedThenCommitted":0,"totalStarted":0,"transactionsCollectionWriteCount":0},"transportSecurity":{"1.0":0,"1.1":0,"1.2":0,"1.3":0,"unknown":0},"twoPhaseCommitCoordinator":{"currentInSteps":{"deletingCoordinatorDoc":0,"waitingForDecisionAcks":0,"waitingForVotes":0,"writingDecision":0,"writingParticipantList":0},"totalAbortedTwoPhaseCommit":0,"totalCommittedTwoPhaseCommit":0,"totalCreated":0,"totalStartedTwoPhaseCommit":0},"uptime":4307,"uptimeEstimate":4307,"uptimeMillis":4307533,"version":"4.4.2","wiredTiger":{"block-manager":{"blocks pre-loaded":7,"blocks read":106,"blocks written":393,"bytes read":454656,"bytes read via memory map API":0,"bytes read via system call API":0,"bytes written":2793472,"bytes written for checkpoint":2793472,"bytes written via memory map API":0,"bytes written via system call API":0,"mapped blocks read":0,"mapped bytes read":0,"number of times the file was remapped because it changed size via fallocate or truncate":0,"number of times the region was remapped via write":0},"cache":{"application threads page read from disk to cache count":6,"application threads page read from disk to cache time (usecs)":63,"application threads page write from cache to disk count":148,"application threads page write from cache to disk time (usecs)":9767,"bytes allocated for updates":23075,"bytes belonging to page images in the cache":76829,"bytes belonging to the history store table in the cache":847,"bytes currently in the cache":106686,"bytes dirty in the cache cumulative":2938265,"bytes not belonging to page images in the cache":29857,"bytes read into cache":71138,"bytes written from cache":1275601,"cache overflow score":0,"checkpoint blocked page eviction":0,"eviction calls to get a page":376,"eviction calls to get a page found queue empty":302,"eviction calls to get a page found queue empty after locking":2,"eviction currently operating in aggressive mode":0,"eviction empty score":0,"eviction passes of a file":0,"eviction server candidate queue empty when topping up":0,"eviction server candidate queue not empty when topping up":0,"eviction server evicting pages":0,"eviction server slept, because we did not make progress with eviction":71,"eviction server unable to reach eviction goal":0,"eviction server waiting for a leaf page":3,"eviction state":64,"eviction walk target pages histogram - 0-9":0,"eviction walk target pages histogram - 10-31":0,"eviction walk target pages histogram - 128 and higher":0,"eviction walk target pages histogram - 32-63":0,"eviction walk target pages histogram - 64-128":0,"eviction walk target strategy both clean and dirty pages":0,"eviction walk target strategy only clean pages":0,"eviction walk target strategy only dirty pages":0,"eviction walks abandoned":0,"eviction walks gave up because they restarted their walk twice":0,"eviction walks gave up because they saw too many pages and found no candidates":0,"eviction walks gave up because they saw too many pages and found too few candidates":0,"eviction walks reached end of tree":0,"eviction walks restarted":0,"eviction walks started from root of tree":0,"eviction walks started from saved location in tree":0,"eviction worker thread active":4,"eviction worker thread created":0,"eviction worker thread evicting pages":71,"eviction worker thread removed":0,"eviction worker thread stable number":0,"files with active eviction walks":0,"files with new eviction walks started":0,"force re-tuning of eviction workers once in a while":0,"forced eviction - history store pages failed to evict while session has history store cursor open":0,"forced eviction - history store pages selected while session has history store cursor open":0,"forced eviction - history store pages successfully evicted while session has history store cursor open":0,"forced eviction - pages evicted that were clean count":0,"forced eviction - pages evicted that were clean time (usecs)":0,"forced eviction - pages evicted that were dirty count":0,"forced eviction - pages evicted that were dirty time (usecs)":0,"forced eviction - pages selected because of too many deleted items count":0,"forced eviction - pages selected count":0,"forced eviction - pages selected unable to be evicted count":0,"forced eviction - pages selected unable to be evicted time":0,"forced eviction - session returned rollback error while force evicting due to being oldest":0,"hazard pointer blocked page eviction":0,"hazard pointer check calls":71,"hazard pointer check entries walked":2,"hazard pointer maximum array length":2,"history store score":0,"history store table insert calls":0,"history store table insert calls that returned restart":0,"history store table max on-disk size":0,"history store table on-disk size":0,"history store table out-of-order resolved updates that lose their durable timestamp":0,"history store table out-of-order updates that were fixed up by moving existing records":0,"history store table out-of-order updates that were fixed up during insertion":0,"history store table reads":0,"history store table reads missed":0,"history store table reads requiring squashed modifies":0,"history store table truncation by rollback to stable to remove an unstable update":0,"history store table truncation by rollback to stable to remove an update":0,"history store table truncation to remove an update":0,"history store table truncation to remove range of updates due to key being removed from the data page during reconciliation":0,"history store table truncation to remove range of updates due to mixed timestamps":0,"history store table writes requiring squashed modifies":0,"in-memory page passed criteria to be split":0,"in-memory page splits":0,"internal pages evicted":0,"internal pages queued for eviction":0,"internal pages seen by eviction walk":0,"internal pages seen by eviction walk that are already queued":0,"internal pages split during eviction":0,"leaf pages split during eviction":0,"maximum bytes configured":505413632,"maximum page size at eviction":352,"modified pages evicted":71,"modified pages evicted by application threads":0,"operations timed out waiting for space in cache":0,"overflow pages read into cache":0,"page split during eviction deepened the tree":0,"page written requiring history store records":0,"pages currently held in the cache":19,"pages evicted by application threads":0,"pages evicted in parallel with checkpoint":71,"pages queued for eviction":0,"pages queued for eviction post lru sorting":0,"pages queued for urgent eviction":71,"pages queued for urgent eviction during walk":0,"pages read into cache":14,"pages read into cache after truncate":72,"pages read into cache after truncate in prepare state":0,"pages requested from the cache":1968,"pages seen by eviction walk":0,"pages seen by eviction walk that are already queued":0,"pages selected for eviction unable to be evicted":0,"pages selected for eviction unable to be evicted as the parent page has overflow items":0,"pages selected for eviction unable to be evicted because of active children on an internal page":0,"pages selected for eviction unable to be evicted because of failure in reconciliation":0,"pages walked for eviction":0,"pages written from cache":156,"pages written requiring in-memory restoration":0,"percentage overhead":8,"tracked bytes belonging to internal pages in the cache":5470,"tracked bytes belonging to leaf pages in the cache":101216,"tracked dirty bytes in the cache":467,"tracked dirty pages in the cache":1,"unmodified pages evicted":0},"capacity":{"background fsync file handles considered":0,"background fsync file handles synced":0,"background fsync time (msecs)":0,"bytes read":77824,"bytes written for checkpoint":1223343,"bytes written for eviction":0,"bytes written for log":1258339200,"bytes written total":1259562543,"threshold to call fsync":0,"time waiting due to total capacity (usecs)":0,"time waiting during checkpoint (usecs)":0,"time waiting during eviction (usecs)":0,"time waiting during logging (usecs)":0,"time waiting during read (usecs)":0},"checkpoint-cleanup":{"pages added for eviction":71,"pages removed":0,"pages skipped during tree walk":0,"pages visited":145},"concurrentTransactions":{"read":{"available":127,"out":1,"totalTickets":128},"write":{"available":128,"out":0,"totalTickets":128}},"connection":{"auto adjusting condition resets":378,"auto adjusting condition wait calls":26641,"auto adjusting condition wait raced to update timeout and skipped updating":0,"detected system time went backwards":0,"files currently open":14,"hash bucket array size for data handles":512,"hash bucket array size general":512,"memory allocations":173468,"memory frees":172614,"memory re-allocations":16623,"pthread mutex condition wait calls":69351,"pthread mutex shared lock read-lock calls":84645,"pthread mutex shared lock write-lock calls":4920,"total fsync I/Os":461,"total read I/Os":1439,"total write I/Os":550},"cursor":{"Total number of entries skipped by cursor next calls":0,"Total number of entries skipped by cursor prev calls":0,"Total number of entries skipped to position the history store cursor":0,"cached cursor count":15,"cursor bulk loaded cursor insert calls":0,"cursor close calls that result in cache":35885,"cursor create calls":70,"cursor insert calls":159,"cursor insert key and value bytes":102523,"cursor modify calls":0,"cursor modify key and value bytes affected":0,"cursor modify value bytes modified":0,"cursor next calls":196,"cursor next calls that skip due to a globally visible history store tombstone":0,"cursor next calls that skip due to a globally visible history store tombstone in rollback to stable":0,"cursor next calls that skip greater than or equal to 100 entries":0,"cursor next calls that skip less than 100 entries":194,"cursor operation restarted":0,"cursor prev calls":81,"cursor prev calls that skip due to a globally visible history store tombstone":0,"cursor prev calls that skip due to a globally visible history store tombstone in rollback to stable":0,"cursor prev calls that skip greater than or equal to 100 entries":0,"cursor prev calls that skip less than 100 entries":81,"cursor remove calls":0,"cursor remove key bytes removed":0,"cursor reserve calls":0,"cursor reset calls":37595,"cursor search calls":839,"cursor search history store calls":0,"cursor search near calls":86,"cursor sweep buckets":6156,"cursor sweep cursors closed":0,"cursor sweep cursors examined":5,"cursor sweeps":1026,"cursor truncate calls":0,"cursor update calls":0,"cursor update key and value bytes":0,"cursor update value size change":0,"cursors reused from cache":35863,"open cursor count":7},"data-handle":{"connection data handle size":432,"connection data handles currently active":19,"connection sweep candidate became referenced":0,"connection sweep dhandles closed":0,"connection sweep dhandles removed from hash list":84,"connection sweep time-of-death sets":681,"connection sweeps":426,"session dhandles swept":139,"session sweep attempts":102},"lock":{"checkpoint lock acquisitions":72,"checkpoint lock application thread wait time (usecs)":0,"checkpoint lock internal thread wait time (usecs)":0,"dhandle lock application thread time waiting (usecs)":0,"dhandle lock internal thread time waiting (usecs)":0,"dhandle read lock acquisitions":17668,"dhandle write lock acquisitions":187,"durable timestamp queue lock application thread time waiting (usecs)":0,"durable timestamp queue lock internal thread time waiting (usecs)":0,"durable timestamp queue read lock acquisitions":0,"durable timestamp queue write lock acquisitions":0,"metadata lock acquisitions":72,"metadata lock application thread wait time (usecs)":100,"metadata lock internal thread wait time (usecs)":0,"read timestamp queue lock application thread time waiting (usecs)":0,"read timestamp queue lock internal thread time waiting (usecs)":0,"read timestamp queue read lock acquisitions":0,"read timestamp queue write lock acquisitions":0,"schema lock acquisitions":85,"schema lock application thread wait time (usecs)":0,"schema lock internal thread wait time (usecs)":0,"table lock application thread time waiting for the table lock (usecs)":0,"table lock internal thread time waiting for the table lock (usecs)":0,"table read lock acquisitions":0,"table write lock acquisitions":9,"txn global lock application thread time waiting (usecs)":0,"txn global lock internal thread time waiting (usecs)":0,"txn global read lock acquisitions":336,"txn global write lock acquisitions":218},"log":{"busy returns attempting to switch slots":0,"force archive time sleeping (usecs)":0,"log bytes of payload data":80873,"log bytes written":112512,"log files manually zero-filled":0,"log flush operations":41972,"log force write operations":46637,"log force write operations skipped":46629,"log records compressed":72,"log records not compressed":0,"log records too small to compress":290,"log release advances write LSN":73,"log scan operations":6,"log scan records requiring two reads":0,"log server thread advances write LSN":8,"log server thread write LSN walk skipped":5236,"log sync operations":81,"log sync time duration (usecs)":150305,"log sync_dir operations":1,"log sync_dir time duration (usecs)":4788,"log write operations":362,"logging bytes consolidated":112000,"maximum log file size":104857600,"number of pre-allocated log files to create":2,"pre-allocated log files not ready and missed":1,"pre-allocated log files prepared":2,"pre-allocated log files used":0,"records processed by log scan":15,"slot close lost race":0,"slot close unbuffered waits":0,"slot closures":81,"slot join atomic update races":0,"slot join calls atomic updates raced":0,"slot join calls did not yield":362,"slot join calls found active slot closed":0,"slot join calls slept":0,"slot join calls yielded":0,"slot join found active slot closed":0,"slot joins yield time (usecs)":0,"slot transitions unable to find free slot":0,"slot unbuffered writes":0,"total in-memory size of compressed records":92785,"total log buffer size":33554432,"total size of compressed records":66331,"written slots coalesced":0,"yields waiting for previous log file close":0},"oplog":{"visibility timestamp":0},"perf":{"file system read latency histogram (bucket 1) - 10-49ms":0,"file system read latency histogram (bucket 2) - 50-99ms":0,"file system read latency histogram (bucket 3) - 100-249ms":0,"file system read latency histogram (bucket 4) - 250-499ms":0,"file system read latency histogram (bucket 5) - 500-999ms":0,"file system read latency histogram (bucket 6) - 1000ms+":0,"file system write latency histogram (bucket 1) - 10-49ms":0,"file system write latency histogram (bucket 2) - 50-99ms":0,"file system write latency histogram (bucket 3) - 100-249ms":0,"file system write latency histogram (bucket 4) - 250-499ms":0,"file system write latency histogram (bucket 5) - 500-999ms":0,"file system write latency histogram (bucket 6) - 1000ms+":0,"operation read latency histogram (bucket 1) - 100-249us":1,"operation read latency histogram (bucket 2) - 250-499us":1,"operation read latency histogram (bucket 3) - 500-999us":0,"operation read latency histogram (bucket 4) - 1000-9999us":0,"operation read latency histogram (bucket 5) - 10000us+":0,"operation write latency histogram (bucket 1) - 100-249us":1,"operation write latency histogram (bucket 2) - 250-499us":0,"operation write latency histogram (bucket 3) - 500-999us":0,"operation write latency histogram (bucket 4) - 1000-9999us":0,"operation write latency histogram (bucket 5) - 10000us+":0},"reconciliation":{"approximate byte size of timestamps in pages written":0,"approximate byte size of transaction IDs in pages written":1184,"fast-path pages deleted":0,"maximum seconds spent in a reconciliation call":0,"page reconciliation calls":374,"page reconciliation calls for eviction":71,"page reconciliation calls that resulted in values with prepared transaction metadata":0,"page reconciliation calls that resulted in values with timestamps":0,"page reconciliation calls that resulted in values with transaction ids":68,"pages deleted":218,"pages written including an aggregated newest start durable timestamp ":0,"pages written including an aggregated newest stop durable timestamp ":0,"pages written including an aggregated newest stop timestamp ":0,"pages written including an aggregated newest stop transaction ID":0,"pages written including an aggregated newest transaction ID ":0,"pages written including an aggregated oldest start timestamp ":0,"pages written including an aggregated prepare":0,"pages written including at least one prepare state":0,"pages written including at least one start durable timestamp":0,"pages written including at least one start timestamp":0,"pages written including at least one start transaction ID":68,"pages written including at least one stop durable timestamp":0,"pages written including at least one stop timestamp":0,"pages written including at least one stop transaction ID":0,"records written including a prepare state":0,"records written including a start durable timestamp":0,"records written including a start timestamp":0,"records written including a start transaction ID":148,"records written including a stop durable timestamp":0,"records written including a stop timestamp":0,"records written including a stop transaction ID":0,"split bytes currently awaiting free":0,"split objects currently awaiting free":0},"session":{"open session count":14,"session query timestamp calls":0,"table alter failed calls":0,"table alter successful calls":0,"table alter unchanged and skipped":0,"table compact failed calls":0,"table compact successful calls":0,"table create failed calls":0,"table create successful calls":1,"table drop failed calls":0,"table drop successful calls":0,"table rename failed calls":0,"table rename successful calls":0,"table salvage failed calls":0,"table salvage successful calls":0,"table truncate failed calls":0,"table truncate successful calls":0,"table verify failed calls":0,"table verify successful calls":0},"snapshot-window-settings":{"cache pressure percentage threshold":95,"current available snapshots window size in seconds":0,"current cache pressure percentage":0,"latest majority snapshot timestamp available":"Jan 1 00:00:00:0","max target available snapshots window size in seconds":5,"oldest majority snapshot timestamp available":"Jan 1 00:00:00:0","target available snapshots window size in seconds":5,"total number of SnapshotTooOld errors":0},"thread-state":{"active filesystem fsync calls":0,"active filesystem read calls":0,"active filesystem write calls":0},"thread-yield":{"application thread time evicting (usecs)":0,"application thread time waiting for cache (usecs)":0,"connection close blocked waiting for transaction state stabilization":0,"connection close yielded for lsm manager shutdown":0,"data handle lock yielded":0,"get reference for page index and slot time sleeping (usecs)":0,"log server sync yielded for log write":0,"page access yielded due to prepare state change":0,"page acquire busy blocked":0,"page acquire eviction blocked":0,"page acquire locked blocked":0,"page acquire read blocked":0,"page acquire time sleeping (usecs)":0,"page delete rollback time sleeping for state change (usecs)":0,"page reconciliation yielded due to child modification":0},"transaction":{"Number of prepared updates":0,"durable timestamp queue entries walked":0,"durable timestamp queue insert to empty":0,"durable timestamp queue inserts to head":0,"durable timestamp queue inserts total":0,"durable timestamp queue length":0,"prepared transactions":0,"prepared transactions committed":0,"prepared transactions currently active":0,"prepared transactions rolled back":0,"query timestamp calls":4301,"race to read prepared update retry":0,"read timestamp queue entries walked":0,"read timestamp queue insert to empty":0,"read timestamp queue inserts to head":0,"read timestamp queue inserts total":0,"read timestamp queue length":0,"rollback to stable calls":0,"rollback to stable hs records with stop timestamps older than newer records":0,"rollback to stable keys removed":0,"rollback to stable keys restored":0,"rollback to stable pages visited":1,"rollback to stable restored tombstones from history store":0,"rollback to stable sweeping history store keys":0,"rollback to stable tree walk skipping pages":0,"rollback to stable updates aborted":0,"rollback to stable updates removed from history store":0,"set timestamp calls":0,"set timestamp durable calls":0,"set timestamp durable updates":0,"set timestamp oldest calls":0,"set timestamp oldest updates":0,"set timestamp stable calls":0,"set timestamp stable updates":0,"transaction begins":180,"transaction checkpoint currently running":0,"transaction checkpoint generation":73,"transaction checkpoint history store file duration (usecs)":2,"transaction checkpoint max time (msecs)":45,"transaction checkpoint min time (msecs)":8,"transaction checkpoint most recent duration for gathering all handles (usecs)":356,"transaction checkpoint most recent duration for gathering applied handles (usecs)":147,"transaction checkpoint most recent duration for gathering skipped handles (usecs)":33,"transaction checkpoint most recent handles applied":1,"transaction checkpoint most recent handles skipped":9,"transaction checkpoint most recent handles walked":20,"transaction checkpoint most recent time (msecs)":12,"transaction checkpoint prepare currently running":0,"transaction checkpoint prepare max time (msecs)":1,"transaction checkpoint prepare min time (msecs)":0,"transaction checkpoint prepare most recent time (msecs)":0,"transaction checkpoint prepare total time (msecs)":1,"transaction checkpoint scrub dirty target":0,"transaction checkpoint scrub time (msecs)":0,"transaction checkpoint total time (msecs)":1373,"transaction checkpoints":72,"transaction checkpoints skipped because database was clean":0,"transaction failures due to history store":0,"transaction fsync calls for checkpoint after allocating the transaction ID":72,"transaction fsync duration for checkpoint after allocating the transaction ID (usecs)":4259,"transaction range of IDs currently pinned":0,"transaction range of IDs currently pinned by a checkpoint":0,"transaction range of timestamps currently pinned":0,"transaction range of timestamps pinned by a checkpoint":0,"transaction range of timestamps pinned by the oldest active read timestamp":0,"transaction range of timestamps pinned by the oldest timestamp":0,"transaction read timestamp of the oldest active reader":0,"transaction sync calls":0,"transactions committed":2,"transactions rolled back":178,"update conflicts":0},"uri":"statistics:"}}
diff --git a/src/go/plugins/mongodb/testdata/top.json b/src/go/plugins/mongodb/testdata/top.json
deleted file mode 100644
index ec30dbda42f..00000000000
--- a/src/go/plugins/mongodb/testdata/top.json
+++ /dev/null
@@ -1 +0,0 @@
-{"ok":1,"totals":{"admin.system.version":{"commands":{"count":0,"time":0},"getmore":{"count":0,"time":0},"insert":{"count":0,"time":0},"queries":{"count":0,"time":0},"readLock":{"count":1,"time":162},"remove":{"count":0,"time":0},"total":{"count":1,"time":162},"update":{"count":0,"time":0},"writeLock":{"count":0,"time":0}},"note":"all times in microseconds"}}
diff --git a/src/go/plugins/plugins_darwin.go b/src/go/plugins/plugins_darwin.go
index fd80aa8fceb..8a8b58a26b3 100644
--- a/src/go/plugins/plugins_darwin.go
+++ b/src/go/plugins/plugins_darwin.go
@@ -25,7 +25,6 @@ import (
_ "zabbix.com/plugins/log"
_ "zabbix.com/plugins/memcached"
_ "zabbix.com/plugins/modbus"
- _ "zabbix.com/plugins/mongodb"
_ "zabbix.com/plugins/mysql"
_ "zabbix.com/plugins/net/dns"
_ "zabbix.com/plugins/net/tcp"
diff --git a/src/go/plugins/plugins_linux.go b/src/go/plugins/plugins_linux.go
index d0e6fd33620..5d7ce08bb5f 100644
--- a/src/go/plugins/plugins_linux.go
+++ b/src/go/plugins/plugins_linux.go
@@ -26,7 +26,6 @@ import (
_ "zabbix.com/plugins/log"
_ "zabbix.com/plugins/memcached"
_ "zabbix.com/plugins/modbus"
- _ "zabbix.com/plugins/mongodb"
_ "zabbix.com/plugins/mqtt"
_ "zabbix.com/plugins/mysql"
_ "zabbix.com/plugins/net/dns"
diff --git a/src/go/plugins/plugins_windows.go b/src/go/plugins/plugins_windows.go
index 95b9060aaae..ef56d97214c 100644
--- a/src/go/plugins/plugins_windows.go
+++ b/src/go/plugins/plugins_windows.go
@@ -24,7 +24,6 @@ import (
_ "zabbix.com/plugins/log"
_ "zabbix.com/plugins/memcached"
_ "zabbix.com/plugins/modbus"
- _ "zabbix.com/plugins/mongodb"
_ "zabbix.com/plugins/mqtt"
_ "zabbix.com/plugins/mysql"
_ "zabbix.com/plugins/net/dns"
diff --git a/src/libs/zbxcommon/comms.c b/src/libs/zbxcommon/comms.c
index ba3277885a4..d72125e31bf 100644
--- a/src/libs/zbxcommon/comms.c
+++ b/src/libs/zbxcommon/comms.c
@@ -18,98 +18,6 @@
**/
#include "common.h"
-#include "base64.h"
-
-int comms_parse_response(char *xml, char *host, size_t host_len, char *key, size_t key_len,
- char *data, size_t data_len, char *lastlogsize, size_t lastlogsize_len,
- char *timestamp, size_t timestamp_len, char *source, size_t source_len,
- char *severity, size_t severity_len)
-{
- int i, ret = SUCCEED;
- char *data_b64 = NULL;
-
- assert(NULL != host && 0 != host_len);
- assert(NULL != key && 0 != key_len);
- assert(NULL != data && 0 != data_len);
- assert(NULL != lastlogsize && 0 != lastlogsize_len);
- assert(NULL != timestamp && 0 != timestamp_len);
- assert(NULL != source && 0 != source_len);
- assert(NULL != severity && 0 != severity_len);
-
- if (SUCCEED == xml_get_data_dyn(xml, "host", &data_b64))
- {
- str_base64_decode(data_b64, host, (int)host_len - 1, &i);
- host[i] = '\0';
- xml_free_data_dyn(&data_b64);
- }
- else
- {
- *host = '\0';
- ret = FAIL;
- }
-
- if (SUCCEED == xml_get_data_dyn(xml, "key", &data_b64))
- {
- str_base64_decode(data_b64, key, (int)key_len - 1, &i);
- key[i] = '\0';
- xml_free_data_dyn(&data_b64);
- }
- else
- {
- *key = '\0';
- ret = FAIL;
- }
-
- if (SUCCEED == xml_get_data_dyn(xml, "data", &data_b64))
- {
- str_base64_decode(data_b64, data, (int)data_len - 1, &i);
- data[i] = '\0';
- xml_free_data_dyn(&data_b64);
- }
- else
- {
- *data = '\0';
- ret = FAIL;
- }
-
- if (SUCCEED == xml_get_data_dyn(xml, "lastlogsize", &data_b64))
- {
- str_base64_decode(data_b64, lastlogsize, (int)lastlogsize_len - 1, &i);
- lastlogsize[i] = '\0';
- xml_free_data_dyn(&data_b64);
- }
- else
- *lastlogsize = '\0';
-
- if (SUCCEED == xml_get_data_dyn(xml, "timestamp", &data_b64))
- {
- str_base64_decode(data_b64, timestamp, (int)timestamp_len - 1, &i);
- timestamp[i] = '\0';
- xml_free_data_dyn(&data_b64);
- }
- else
- *timestamp = '\0';
-
- if (SUCCEED == xml_get_data_dyn(xml, "source", &data_b64))
- {
- str_base64_decode(data_b64, source, (int)source_len - 1, &i);
- source[i] = '\0';
- xml_free_data_dyn(&data_b64);
- }
- else
- *source = '\0';
-
- if (SUCCEED == xml_get_data_dyn(xml, "severity", &data_b64))
- {
- str_base64_decode(data_b64, severity, (int)severity_len - 1, &i);
- severity[i] = '\0';
- xml_free_data_dyn(&data_b64);
- }
- else
- *severity = '\0';
-
- return ret;
-}
/******************************************************************************
* *
diff --git a/src/libs/zbxcommon/str.c b/src/libs/zbxcommon/str.c
index f3fa6aebc47..e3e8b032136 100644
--- a/src/libs/zbxcommon/str.c
+++ b/src/libs/zbxcommon/str.c
@@ -2326,112 +2326,6 @@ void zbx_replace_invalid_utf8(char *text)
*out = '\0';
}
-/******************************************************************************
- * *
- * Purpose: decodes 3-byte utf-8 sequence *
- * *
- * Parameters: ptr - [IN] pointer to the 3 byte sequence *
- * out - [OUT] the decoded value *
- * *
- * Return value: SUCCEED on success *
- * FAIL on failure *
- * *
- ******************************************************************************/
-static int utf8_decode_3byte_sequence(const char *ptr, zbx_uint32_t *out)
-{
- *out = ((unsigned char)*ptr++ & 0xF) << 12;
- if (0x80 != (*ptr & 0xC0))
- return FAIL;
-
- *out |= ((unsigned char)*ptr++ & 0x3F) << 6;
- if (0x80 != (*ptr & 0xC0))
- return FAIL;
-
- *out |= ((unsigned char)*ptr & 0x3F);
- return SUCCEED;
-}
-
-/******************************************************************************
- * *
- * Purpose: convert cesu8 encoded string to utf8 *
- * *
- * Parameters: cesu8 - [IN] pointer to the first char of NULL terminated CESU8*
- * string *
- * utf8 - [OUT] on success, pointer to pointer to the first char *
- * of allocated NULL terminated UTF8 string *
- * *
- * Return value: SUCCEED on success *
- * FAIL on failure *
- * *
- ******************************************************************************/
-int zbx_cesu8_to_utf8(const char *cesu8, char **utf8)
-{
- const char *in, *end;
- char *out;
- size_t len;
-
- len = strlen(cesu8);
- out = *utf8 = zbx_malloc(*utf8, len + 1);
- end = cesu8 + len;
-
- for (in = cesu8; in < end;)
- {
- if (0x7f >= (unsigned char)*in)
- {
- *out++ = *in++;
- continue;
- }
-
- if (0xdf >= (unsigned char)*in)
- {
- if (2 > end - in)
- goto fail;
-
- *out++ = *in++;
- *out++ = *in++;
- continue;
- }
-
- if (0xef >= (unsigned char)*in)
- {
- zbx_uint32_t c1, c2, u;
-
- if (3 > end - in || FAIL == utf8_decode_3byte_sequence(in, &c1))
- goto fail;
-
- if (0xd800 > c1 || 0xdbff < c1)
- {
- /* normal 3-byte sequence */
- *out++ = *in++;
- *out++ = *in++;
- *out++ = *in++;
- continue;
- }
-
- /* decode unicode supplementary character represented as surrogate pair */
- in += 3;
- if (3 > end - in || FAIL == utf8_decode_3byte_sequence(in, &c2) || 0xdc00 > c2 || 0xdfff < c2)
- goto fail;
-
- u = 0x10000 + ((((zbx_uint32_t)c1 & 0x3ff) << 10) | (c2 & 0x3ff));
- *out++ = 0xf0 | u >> 18;
- *out++ = 0x80 | (u >> 12 & 0x3f);
- *out++ = 0x80 | (u >> 6 & 0x3f);
- *out++ = 0x80 | (u & 0x3f);
- in += 3;
- continue;
- }
-
- /* the four-byte UTF-8 style supplementary character sequence is not supported by CESU-8 */
- goto fail;
- }
- *out = '\0';
- return SUCCEED;
-fail:
- zbx_free(*utf8);
- return FAIL;
-}
-
void dos2unix(char *str)
{
char *o = str;
diff --git a/src/libs/zbxdbhigh/db.c b/src/libs/zbxdbhigh/db.c
index a583d0ae833..8728afda42e 100644
--- a/src/libs/zbxdbhigh/db.c
+++ b/src/libs/zbxdbhigh/db.c
@@ -2107,9 +2107,8 @@ int DBtable_exists(const char *table_name)
#elif defined(HAVE_ORACLE)
result = DBselect(
"select 1"
- " from tab"
- " where tabtype='TABLE'"
- " and lower(tname)='%s'",
+ " from all_tables"
+ " where lower(table_name)='%s'",
table_name_esc);
#elif defined(HAVE_POSTGRESQL)
result = DBselect(
diff --git a/src/libs/zbxdbupgrade/dbupgrade_6000.c b/src/libs/zbxdbupgrade/dbupgrade_6000.c
index 8888decca6e..d87cda598ed 100644
--- a/src/libs/zbxdbupgrade/dbupgrade_6000.c
+++ b/src/libs/zbxdbupgrade/dbupgrade_6000.c
@@ -59,6 +59,154 @@ static int DBpatch_6000002(void)
return SUCCEED;
}
+#define HTTPSTEP_ITEM_TYPE_RSPCODE 0
+#define HTTPSTEP_ITEM_TYPE_TIME 1
+#define HTTPSTEP_ITEM_TYPE_IN 2
+#define HTTPSTEP_ITEM_TYPE_LASTSTEP 3
+#define HTTPSTEP_ITEM_TYPE_LASTERROR 4
+
+static int DBpatch_6000003(void)
+{
+ DB_ROW row;
+ DB_RESULT result;
+ int ret = SUCCEED;
+ char *sql = NULL;
+ size_t sql_alloc = 0, sql_offset = 0, out_alloc = 0;
+ char *out = NULL;
+
+ if (ZBX_PROGRAM_TYPE_SERVER != program_type)
+ return SUCCEED;
+
+ DBbegin_multiple_update(&sql, &sql_alloc, &sql_offset);
+
+ result = DBselect(
+ "select hi.itemid,hi.type,ht.name"
+ " from httptestitem hi,httptest ht"
+ " where hi.httptestid=ht.httptestid");
+
+ while (SUCCEED == ret && NULL != (row = DBfetch(result)))
+ {
+ zbx_uint64_t itemid;
+ char *esc;
+ size_t out_offset = 0;
+ unsigned char type;
+
+ ZBX_STR2UINT64(itemid, row[0]);
+ ZBX_STR2UCHAR(type, row[1]);
+
+ switch (type)
+ {
+ case HTTPSTEP_ITEM_TYPE_IN:
+ zbx_snprintf_alloc(&out, &out_alloc, &out_offset,
+ "Download speed for scenario \"%s\".", row[2]);
+ break;
+ case HTTPSTEP_ITEM_TYPE_LASTSTEP:
+ zbx_snprintf_alloc(&out, &out_alloc, &out_offset,
+ "Failed step of scenario \"%s\".", row[2]);
+ break;
+ case HTTPSTEP_ITEM_TYPE_LASTERROR:
+ zbx_snprintf_alloc(&out, &out_alloc, &out_offset,
+ "Last error message of scenario \"%s\".", row[2]);
+ break;
+ }
+ esc = DBdyn_escape_field("items", "name", out);
+ zbx_snprintf_alloc(&sql, &sql_alloc, &sql_offset, "update items set name='%s' where itemid="
+ ZBX_FS_UI64 ";\n", esc, itemid);
+ zbx_free(esc);
+
+ ret = DBexecute_overflowed_sql(&sql, &sql_alloc, &sql_offset);
+ }
+ DBfree_result(result);
+
+ DBend_multiple_update(&sql, &sql_alloc, &sql_offset);
+
+ if (SUCCEED == ret && 16 < sql_offset)
+ {
+ if (ZBX_DB_OK > DBexecute("%s", sql))
+ ret = FAIL;
+ }
+
+ zbx_free(sql);
+ zbx_free(out);
+
+ return ret;
+}
+
+static int DBpatch_6000004(void)
+{
+ DB_ROW row;
+ DB_RESULT result;
+ int ret = SUCCEED;
+ char *sql = NULL;
+ size_t sql_alloc = 0, sql_offset = 0, out_alloc = 0;
+ char *out = NULL;
+
+ if (ZBX_PROGRAM_TYPE_SERVER != program_type)
+ return SUCCEED;
+
+ DBbegin_multiple_update(&sql, &sql_alloc, &sql_offset);
+
+ result = DBselect(
+ "select hi.itemid,hi.type,hs.name,ht.name"
+ " from httpstepitem hi,httpstep hs,httptest ht"
+ " where hi.httpstepid=hs.httpstepid"
+ " and hs.httptestid=ht.httptestid");
+
+ while (SUCCEED == ret && NULL != (row = DBfetch(result)))
+ {
+ zbx_uint64_t itemid;
+ char *esc;
+ size_t out_offset = 0;
+ unsigned char type;
+
+ ZBX_STR2UINT64(itemid, row[0]);
+ ZBX_STR2UCHAR(type, row[1]);
+
+ switch (type)
+ {
+ case HTTPSTEP_ITEM_TYPE_IN:
+ zbx_snprintf_alloc(&out, &out_alloc, &out_offset,
+ "Download speed for step \"%s\" of scenario \"%s\".", row[2], row[3]);
+ break;
+ case HTTPSTEP_ITEM_TYPE_TIME:
+ zbx_snprintf_alloc(&out, &out_alloc, &out_offset,
+ "Response time for step \"%s\" of scenario \"%s\".", row[2], row[3]);
+ break;
+ case HTTPSTEP_ITEM_TYPE_RSPCODE:
+ zbx_snprintf_alloc(&out, &out_alloc, &out_offset,
+ "Response code for step \"%s\" of scenario \"%s\".", row[2], row[3]);
+ break;
+ }
+
+ esc = DBdyn_escape_field("items", "name", out);
+ zbx_snprintf_alloc(&sql, &sql_alloc, &sql_offset, "update items set name='%s' where itemid="
+ ZBX_FS_UI64 ";\n", esc, itemid);
+ zbx_free(esc);
+
+ ret = DBexecute_overflowed_sql(&sql, &sql_alloc, &sql_offset);
+ }
+ DBfree_result(result);
+
+ DBend_multiple_update(&sql, &sql_alloc, &sql_offset);
+
+ if (SUCCEED == ret && 16 < sql_offset)
+ {
+ if (ZBX_DB_OK > DBexecute("%s", sql))
+ ret = FAIL;
+ }
+
+ zbx_free(sql);
+ zbx_free(out);
+
+ return ret;
+}
+
+#undef HTTPSTEP_ITEM_TYPE_RSPCODE
+#undef HTTPSTEP_ITEM_TYPE_TIME
+#undef HTTPSTEP_ITEM_TYPE_IN
+#undef HTTPSTEP_ITEM_TYPE_LASTSTEP
+#undef HTTPSTEP_ITEM_TYPE_LASTERROR
+
#endif
DBPATCH_START(6000)
@@ -68,5 +216,7 @@ DBPATCH_START(6000)
DBPATCH_ADD(6000000, 0, 1)
DBPATCH_ADD(6000001, 0, 0)
DBPATCH_ADD(6000002, 0, 0)
+DBPATCH_ADD(6000003, 0, 0)
+DBPATCH_ADD(6000004, 0, 0)
DBPATCH_END()
diff --git a/src/libs/zbxembed/console.c b/src/libs/zbxembed/console.c
index b2a9987617a..dd8f1bc2abc 100644
--- a/src/libs/zbxembed/console.c
+++ b/src/libs/zbxembed/console.c
@@ -75,7 +75,7 @@ static duk_ret_t es_log_message(duk_context *ctx, int level)
if (0 == duk_is_null_or_undefined(ctx, -1))
{
- if (SUCCEED != zbx_cesu8_to_utf8(msg_raw, &msg_output))
+ if (SUCCEED != es_duktape_string_decode(msg_raw, &msg_output))
{
msg_output = zbx_strdup(msg_output, msg_raw);
zbx_replace_invalid_utf8(msg_output);
diff --git a/src/libs/zbxembed/embed.c b/src/libs/zbxembed/embed.c
index 7e627d602c2..e392a678e8b 100644
--- a/src/libs/zbxembed/embed.c
+++ b/src/libs/zbxembed/embed.c
@@ -121,6 +121,127 @@ static void es_free(void *udata, void *ptr)
/******************************************************************************
* *
+ * Purpose: decodes 3-byte utf-8 sequence *
+ * *
+ * Parameters: ptr - [IN] pointer to the 3 byte sequence *
+ * out - [OUT] the decoded value *
+ * *
+ * Return value: SUCCEED *
+ * FAIL *
+ * *
+ ******************************************************************************/
+static int utf8_decode_3byte_sequence(const char *ptr, zbx_uint32_t *out)
+{
+ *out = ((unsigned char)*ptr++ & 0xFu) << 12;
+ if (0x80 != (*ptr & 0xC0))
+ return FAIL;
+
+ *out |= ((unsigned char)*ptr++ & 0x3Fu) << 6;
+ if (0x80 != (*ptr & 0xC0))
+ return FAIL;
+
+ *out |= ((unsigned char)*ptr & 0x3Fu);
+
+ return SUCCEED;
+}
+
+/******************************************************************************
+ * *
+ * Purpose: decodes duktape string into utf-8 *
+ * *
+ * Parameters: duk_str - [IN] pointer to the first char of NULL terminated *
+ * Duktape string *
+ * out_str - [OUT] on success, pointer to pointer to the first *
+ * char of allocated NULL terminated UTF8 string *
+ * *
+ * Return value: SUCCEED *
+ * FAIL *
+ * *
+ ******************************************************************************/
+int es_duktape_string_decode(const char *duk_str, char **out_str)
+{
+ const char *in, *end;
+ char *out;
+ size_t len;
+
+ len = strlen(duk_str);
+ out = *out_str = zbx_malloc(*out_str, len + 1);
+ end = duk_str + len;
+
+ for (in = duk_str; in < end;)
+ {
+ if (0x7f >= (unsigned char)*in)
+ {
+ *out++ = *in++;
+ continue;
+ }
+
+ if (0xdf >= (unsigned char)*in)
+ {
+ if (2 > end - in)
+ goto fail;
+
+ *out++ = *in++;
+ *out++ = *in++;
+ continue;
+ }
+
+ if (0xef >= (unsigned char)*in)
+ {
+ zbx_uint32_t c1, c2, u;
+
+ if (3 > end - in || FAIL == utf8_decode_3byte_sequence(in, &c1))
+ goto fail;
+
+ if (0xd800 > c1 || 0xdbff < c1)
+ {
+ /* normal 3-byte sequence */
+ *out++ = *in++;
+ *out++ = *in++;
+ *out++ = *in++;
+ continue;
+ }
+
+ /* decode unicode supplementary character represented as surrogate pair */
+ in += 3;
+ if (3 > end - in || FAIL == utf8_decode_3byte_sequence(in, &c2) || 0xdc00 > c2 || 0xdfff < c2)
+ goto fail;
+
+ u = 0x10000 + ((((zbx_uint32_t)c1 & 0x3ff) << 10) | (c2 & 0x3ff));
+ *out++ = (char)(0xf0 | u >> 18);
+ *out++ = (char)(0x80 | (u >> 12 & 0x3f));
+ *out++ = (char)(0x80 | (u >> 6 & 0x3f));
+ *out++ = (char)(0x80 | (u & 0x3f));
+ in += 3;
+ continue;
+ }
+
+ /* duktape can use the four-byte UTF-8 style supplementary character sequence */
+ if (0xf0 >= (unsigned char)*in)
+ {
+ if (4 > end - in)
+ goto fail;
+
+ *out++ = *in++;
+ *out++ = *in++;
+ *out++ = *in++;
+ *out++ = *in++;
+ continue;
+ }
+
+ goto fail;
+ }
+ *out = '\0';
+
+ return SUCCEED;
+fail:
+ zbx_free(*out_str);
+
+ return FAIL;
+}
+
+/******************************************************************************
+ * *
* Purpose: timeout checking callback *
* *
******************************************************************************/
@@ -415,8 +536,8 @@ out:
* bytecode parameters. *
* *
******************************************************************************/
-int zbx_es_execute(zbx_es_t *es, const char *script, const char *code, int size, const char *param, char **script_ret,
- char **error)
+int zbx_es_execute(zbx_es_t *es, const char *script, const char *code, int size, const char *param,
+ char **script_ret, char **error)
{
void *buffer;
volatile int ret = FAIL;
@@ -492,8 +613,11 @@ int zbx_es_execute(zbx_es_t *es, const char *script, const char *code, int size,
{
char *output = NULL;
- if (SUCCEED != (ret = zbx_cesu8_to_utf8(duk_safe_to_string(es->env->ctx, -1), &output)))
+ if (SUCCEED != (ret = es_duktape_string_decode(
+ duk_safe_to_string(es->env->ctx, -1), &output)))
+ {
*error = zbx_strdup(*error, "could not convert return value to utf8");
+ }
else
zabbix_log(LOG_LEVEL_DEBUG, "%s() output:'%s'", __func__, output);
diff --git a/src/libs/zbxembed/embed.h b/src/libs/zbxembed/embed.h
index a0a360c985d..b807247c5b9 100644
--- a/src/libs/zbxembed/embed.h
+++ b/src/libs/zbxembed/embed.h
@@ -52,4 +52,6 @@ struct zbx_es_env
zbx_es_env_t *zbx_es_get_env(duk_context *ctx);
+int es_duktape_string_decode(const char *duk_str, char **out_str);
+
#endif /* ZABBIX_EMBED_H */
diff --git a/src/libs/zbxembed/global.c b/src/libs/zbxembed/global.c
index 9dee353d98e..6c51f2dc439 100644
--- a/src/libs/zbxembed/global.c
+++ b/src/libs/zbxembed/global.c
@@ -40,7 +40,7 @@ static duk_ret_t es_btoa(duk_context *ctx)
{
char *str = NULL, *b64str = NULL;
- if (SUCCEED != zbx_cesu8_to_utf8(duk_require_string(ctx, 0), &str))
+ if (SUCCEED != es_duktape_string_decode(duk_require_string(ctx, 0), &str))
return duk_error(ctx, DUK_RET_TYPE_ERROR, "cannot convert value to utf8");
str_base64_encode_dyn(str, &b64str, (int)strlen(str));
@@ -66,7 +66,7 @@ static duk_ret_t es_atob(duk_context *ctx)
char *buffer = NULL, *str = NULL;
int out_size, buffer_size;
- if (SUCCEED != zbx_cesu8_to_utf8(duk_require_string(ctx, 0), &str))
+ if (SUCCEED != es_duktape_string_decode(duk_require_string(ctx, 0), &str))
return duk_error(ctx, DUK_RET_TYPE_ERROR, "cannot convert value to utf8");
buffer_size = (int)strlen(str) * 3 / 4 + 1;
diff --git a/src/libs/zbxembed/httprequest.c b/src/libs/zbxembed/httprequest.c
index 9a56a0952ef..f3b12ccf9c9 100644
--- a/src/libs/zbxembed/httprequest.c
+++ b/src/libs/zbxembed/httprequest.c
@@ -200,7 +200,7 @@ static duk_ret_t es_httprequest_add_header(duk_context *ctx)
if (NULL == (request = es_httprequest(ctx)))
return duk_error(ctx, DUK_RET_EVAL_ERROR, "internal scripting error: null object");
- if (SUCCEED != zbx_cesu8_to_utf8(duk_to_string(ctx, 0), &utf8))
+ if (SUCCEED != es_duktape_string_decode(duk_to_string(ctx, 0), &utf8))
{
err_index = duk_push_error_object(ctx, DUK_RET_TYPE_ERROR, "cannot convert header to utf8");
goto out;
@@ -266,7 +266,7 @@ static duk_ret_t es_httprequest_query(duk_context *ctx, const char *http_request
goto out;
}
- if (SUCCEED != zbx_cesu8_to_utf8(duk_to_string(ctx, 0), &url))
+ if (SUCCEED != es_duktape_string_decode(duk_to_string(ctx, 0), &url))
{
err_index = duk_push_error_object(ctx, DUK_RET_TYPE_ERROR, "cannot convert URL to utf8");
goto out;
@@ -274,7 +274,7 @@ static duk_ret_t es_httprequest_query(duk_context *ctx, const char *http_request
if (0 == duk_is_null_or_undefined(ctx, 1))
{
- if (SUCCEED != zbx_cesu8_to_utf8(duk_to_string(ctx, 1), &contents))
+ if (SUCCEED != es_duktape_string_decode(duk_to_string(ctx, 1), &contents))
{
err_index = duk_push_error_object(ctx, DUK_RET_TYPE_ERROR,
"cannot convert request contents to utf8");
@@ -704,7 +704,7 @@ static duk_ret_t es_httprequest_set_httpauth(duk_context *ctx)
if (0 == duk_is_null_or_undefined(ctx, 1))
{
- if (SUCCEED != zbx_cesu8_to_utf8(duk_to_string(ctx, 1), &username))
+ if (SUCCEED != es_duktape_string_decode(duk_to_string(ctx, 1), &username))
{
err_index = duk_push_error_object(ctx, DUK_RET_TYPE_ERROR, "cannot convert username to utf8");
goto out;
@@ -713,7 +713,7 @@ static duk_ret_t es_httprequest_set_httpauth(duk_context *ctx)
if (0 == duk_is_null_or_undefined(ctx, 2))
{
- if (SUCCEED != zbx_cesu8_to_utf8(duk_to_string(ctx, 2), &password))
+ if (SUCCEED != es_duktape_string_decode(duk_to_string(ctx, 2), &password))
{
err_index = duk_push_error_object(ctx, DUK_RET_TYPE_ERROR, "cannot convert username to utf8");
goto out;
diff --git a/src/libs/zbxembed/zabbix.c b/src/libs/zbxembed/zabbix.c
index 8bac5590949..4422790bb7f 100644
--- a/src/libs/zbxembed/zabbix.c
+++ b/src/libs/zbxembed/zabbix.c
@@ -69,7 +69,7 @@ static duk_ret_t es_zabbix_log(duk_context *ctx)
level = duk_to_int(ctx, 0);
- if (SUCCEED != zbx_cesu8_to_utf8(duk_to_string(ctx, 1), &message))
+ if (SUCCEED != es_duktape_string_decode(duk_to_string(ctx, 1), &message))
{
message = zbx_strdup(message, duk_to_string(ctx, 1));
zbx_replace_invalid_utf8(message);
diff --git a/src/libs/zbxeval/parse.c b/src/libs/zbxeval/parse.c
index 35fae087a63..dbc97ab7635 100644
--- a/src/libs/zbxeval/parse.c
+++ b/src/libs/zbxeval/parse.c
@@ -223,12 +223,14 @@ static int eval_parse_constant(zbx_eval_context_t *ctx, size_t pos, zbx_eval_tok
{
zbx_token_t tok;
size_t offset = pos;
- zbx_token_type_t type = 0;
+ zbx_token_type_t type = 0, last_type = 0;
do
{
if ('{' == (ctx->expression[offset]))
{
+ last_type = ZBX_TOKEN_MACRO;
+
if (SUCCEED != eval_parse_macro(ctx, (int)offset, &tok))
break;
@@ -271,9 +273,9 @@ static int eval_parse_constant(zbx_eval_context_t *ctx, size_t pos, zbx_eval_tok
goto out;
}
}
- else if (SUCCEED == eval_parse_number(ctx, offset, &offset))
+ else if (ZBX_EVAL_TOKEN_VAR_NUM != last_type && SUCCEED == eval_parse_number(ctx, offset, &offset))
{
- type = ZBX_EVAL_TOKEN_VAR_NUM;
+ last_type = type = ZBX_EVAL_TOKEN_VAR_NUM;
offset++;
}
else if (SUCCEED == eval_is_compound_number_char(ctx->expression[offset], offset - pos))
diff --git a/src/zabbix_java/src/com/zabbix/gateway/GeneralInformation.java b/src/zabbix_java/src/com/zabbix/gateway/GeneralInformation.java
index a59660d140d..837f54a4d6d 100644
--- a/src/zabbix_java/src/com/zabbix/gateway/GeneralInformation.java
+++ b/src/zabbix_java/src/com/zabbix/gateway/GeneralInformation.java
@@ -24,7 +24,7 @@ class GeneralInformation
static final String APPLICATION_NAME = "Zabbix Java Gateway";
static final String REVISION_DATE = "30 May 2022";
static final String REVISION = "{ZABBIX_REVISION}";
- static final String VERSION = "6.0.5";
+ static final String VERSION = "6.0.6rc1";
static void printVersion()
{
diff --git a/src/zabbix_js/Makefile.am b/src/zabbix_js/Makefile.am
index c8b360e5619..ec8465fc1b8 100644
--- a/src/zabbix_js/Makefile.am
+++ b/src/zabbix_js/Makefile.am
@@ -20,7 +20,7 @@ zabbix_js_LDADD = \
$(top_srcdir)/src/libs/zbxcompress/libzbxcompress.a \
$(top_srcdir)/src/libs/zbxhttp/libzbxhttp.a \
$(top_srcdir)/src/libs/zbxxml/libzbxxml.a
-
-zabbix_js_LDADD += @ZBXJS_LIBS@
-zabbix_js_LDFLAGS = @ZBXJS_LDFLAGS@
+zabbix_js_LDADD += @ZBXJS_LIBS@ $(LIBXML2_LIBS)
+
+zabbix_js_LDFLAGS = @ZBXJS_LDFLAGS@ $(LIBXML2_LDFLAGS)
diff --git a/src/zabbix_proxy/Makefile.am b/src/zabbix_proxy/Makefile.am
index 5a6c77d98b9..cdcda13c83b 100644
--- a/src/zabbix_proxy/Makefile.am
+++ b/src/zabbix_proxy/Makefile.am
@@ -67,6 +67,7 @@ zabbix_proxy_LDADD = \
$(top_builddir)/src/libs/zbxcommon/libzbxcommon.a \
$(top_builddir)/src/libs/zbxcrypto/libzbxcrypto.a \
$(top_builddir)/src/libs/zbxcomms/libzbxcomms.a \
+ $(top_builddir)/src/libs/zbxcommon/libzbxcommon.a \
$(top_builddir)/src/libs/zbxcommshigh/libzbxcommshigh.a \
$(top_builddir)/src/libs/zbxjson/libzbxjson.a \
$(top_builddir)/src/libs/zbxhttp/libzbxhttp.a \
diff --git a/src/zabbix_server/Makefile.am b/src/zabbix_server/Makefile.am
index 96e778f8b44..23d1430c2db 100644
--- a/src/zabbix_server/Makefile.am
+++ b/src/zabbix_server/Makefile.am
@@ -101,6 +101,7 @@ zabbix_server_LDADD = \
$(top_builddir)/src/libs/zbxcommon/libzbxcommon.a \
$(top_builddir)/src/libs/zbxcrypto/libzbxcrypto.a \
$(top_builddir)/src/libs/zbxcomms/libzbxcomms.a \
+ $(top_builddir)/src/libs/zbxcommon/libzbxcommon.a \
$(top_builddir)/src/libs/zbxcommshigh/libzbxcommshigh.a \
$(top_builddir)/src/libs/zbxjson/libzbxjson.a \
$(top_builddir)/src/libs/zbxhttp/libzbxhttp.a \
diff --git a/src/zabbix_server/ha/ha_manager.c b/src/zabbix_server/ha/ha_manager.c
index 544e38f1d9a..9a48f063735 100644
--- a/src/zabbix_server/ha/ha_manager.c
+++ b/src/zabbix_server/ha/ha_manager.c
@@ -1668,7 +1668,7 @@ int zbx_ha_stop(char **error)
zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __func__);
- if (ZBX_THREAD_ERROR == ha_pid)
+ if (ZBX_THREAD_ERROR == ha_pid || 0 != kill(ha_pid, 0))
{
ret = SUCCEED;
goto out;
@@ -1688,7 +1688,8 @@ int zbx_ha_stop(char **error)
ret = SUCCEED;
}
out:
- ha_pid = ZBX_THREAD_ERROR;
+ if (SUCCEED == ret)
+ ha_pid = ZBX_THREAD_ERROR;
zabbix_log(LOG_LEVEL_DEBUG, "End of %s():%s", __func__, zbx_result_string(ret));
@@ -1702,9 +1703,12 @@ out:
******************************************************************************/
void zbx_ha_kill(void)
{
- kill(ha_pid, SIGKILL);
- zbx_thread_wait(ha_pid);
- ha_pid = ZBX_THREAD_ERROR;
+ if (ZBX_THREAD_ERROR != ha_pid)
+ {
+ kill(ha_pid, SIGKILL);
+ zbx_thread_wait(ha_pid);
+ ha_pid = ZBX_THREAD_ERROR;
+ }
}
/******************************************************************************
diff --git a/src/zabbix_server/poller/checks_simple_vmware.c b/src/zabbix_server/poller/checks_simple_vmware.c
index 90ab321c00b..3dabe736a9f 100644
--- a/src/zabbix_server/poller/checks_simple_vmware.c
+++ b/src/zabbix_server/poller/checks_simple_vmware.c
@@ -2545,12 +2545,6 @@ static int check_vcenter_datastore_latency(AGENT_REQUEST *request, const char *u
datastore = service->data->datastores.values[i];
}
- if (NULL == datastore->uuid)
- {
- SET_MSG_RESULT(result, zbx_strdup(NULL, "Unknown datastore uuid."));
- goto unlock;
- }
-
if (FAIL == zbx_vmware_service_get_counterid(service, perfcounter, &counterid, &unit))
{
SET_MSG_RESULT(result, zbx_strdup(NULL, "Performance counter is not available."));
diff --git a/src/zabbix_server/trapper/Makefile.am b/src/zabbix_server/trapper/Makefile.am
index adece376064..05947d08f9e 100644
--- a/src/zabbix_server/trapper/Makefile.am
+++ b/src/zabbix_server/trapper/Makefile.am
@@ -30,3 +30,6 @@ libzbxtrapper_server_a_SOURCES = \
libzbxtrapper_proxy_a_SOURCES = \
trapper_proxy.c \
trapper_request.h
+
+libzbxtrapper_a_CFLAGS = \
+ $(LIBXML2_CFLAGS)
diff --git a/src/zabbix_server/trapper/trapper.c b/src/zabbix_server/trapper/trapper.c
index 669365ba018..e52f7e22340 100644
--- a/src/zabbix_server/trapper/trapper.c
+++ b/src/zabbix_server/trapper/trapper.c
@@ -35,6 +35,7 @@
#include "trapper_expressions_evaluate.h"
#include "trapper_item_test.h"
#include "trapper_request.h"
+#include "base64.h"
#ifdef HAVE_NETSNMP
# include "zbxrtc.h"
@@ -949,6 +950,97 @@ static void active_passive_misconfig(zbx_socket_t *sock)
zbx_free(msg);
}
+static int comms_parse_response(char *xml, char *host, size_t host_len, char *key, size_t key_len,
+ char *data, size_t data_len, char *lastlogsize, size_t lastlogsize_len,
+ char *timestamp, size_t timestamp_len, char *source, size_t source_len,
+ char *severity, size_t severity_len)
+{
+ int i, ret = SUCCEED;
+ char *data_b64 = NULL;
+
+ assert(NULL != host && 0 != host_len);
+ assert(NULL != key && 0 != key_len);
+ assert(NULL != data && 0 != data_len);
+ assert(NULL != lastlogsize && 0 != lastlogsize_len);
+ assert(NULL != timestamp && 0 != timestamp_len);
+ assert(NULL != source && 0 != source_len);
+ assert(NULL != severity && 0 != severity_len);
+
+ if (SUCCEED == xml_get_data_dyn(xml, "host", &data_b64))
+ {
+ str_base64_decode(data_b64, host, (int)host_len - 1, &i);
+ host[i] = '\0';
+ xml_free_data_dyn(&data_b64);
+ }
+ else
+ {
+ *host = '\0';
+ ret = FAIL;
+ }
+
+ if (SUCCEED == xml_get_data_dyn(xml, "key", &data_b64))
+ {
+ str_base64_decode(data_b64, key, (int)key_len - 1, &i);
+ key[i] = '\0';
+ xml_free_data_dyn(&data_b64);
+ }
+ else
+ {
+ *key = '\0';
+ ret = FAIL;
+ }
+
+ if (SUCCEED == xml_get_data_dyn(xml, "data", &data_b64))
+ {
+ str_base64_decode(data_b64, data, (int)data_len - 1, &i);
+ data[i] = '\0';
+ xml_free_data_dyn(&data_b64);
+ }
+ else
+ {
+ *data = '\0';
+ ret = FAIL;
+ }
+
+ if (SUCCEED == xml_get_data_dyn(xml, "lastlogsize", &data_b64))
+ {
+ str_base64_decode(data_b64, lastlogsize, (int)lastlogsize_len - 1, &i);
+ lastlogsize[i] = '\0';
+ xml_free_data_dyn(&data_b64);
+ }
+ else
+ *lastlogsize = '\0';
+
+ if (SUCCEED == xml_get_data_dyn(xml, "timestamp", &data_b64))
+ {
+ str_base64_decode(data_b64, timestamp, (int)timestamp_len - 1, &i);
+ timestamp[i] = '\0';
+ xml_free_data_dyn(&data_b64);
+ }
+ else
+ *timestamp = '\0';
+
+ if (SUCCEED == xml_get_data_dyn(xml, "source", &data_b64))
+ {
+ str_base64_decode(data_b64, source, (int)source_len - 1, &i);
+ source[i] = '\0';
+ xml_free_data_dyn(&data_b64);
+ }
+ else
+ *source = '\0';
+
+ if (SUCCEED == xml_get_data_dyn(xml, "severity", &data_b64))
+ {
+ str_base64_decode(data_b64, severity, (int)severity_len - 1, &i);
+ severity[i] = '\0';
+ xml_free_data_dyn(&data_b64);
+ }
+ else
+ *severity = '\0';
+
+ return ret;
+}
+
static int process_trap(zbx_socket_t *sock, char *s, ssize_t bytes_received, zbx_timespec_t *ts)
{
int ret = SUCCEED;
diff --git a/src/zabbix_server/vmware/vmware.c b/src/zabbix_server/vmware/vmware.c
index 042132c527b..703e39ce66d 100644
--- a/src/zabbix_server/vmware/vmware.c
+++ b/src/zabbix_server/vmware/vmware.c
@@ -231,10 +231,6 @@ static zbx_uint64_t evt_req_chunk_size;
#define ZBX_XPATH_COUNTERINFO() \
"/*/*/*/*/*/*[local-name()='propSet']/*[local-name()='val']/*[local-name()='PerfCounterInfo']"
-#define ZBX_XPATH_DATASTORE_MOUNT() \
- "/*/*/*/*/*/*[local-name()='propSet']/*/*[local-name()='DatastoreHostMount']" \
- "/*[local-name()='mountInfo']/*[local-name()='path']"
-
#define ZBX_XPATH_HV_DATASTORES() \
"/*/*/*/*/*/*[local-name()='propSet'][*[local-name()='name'][text()='datastore']]" \
"/*[local-name()='val']/*[@type='Datastore']"
@@ -971,9 +967,7 @@ static void vmware_datastore_shared_free(zbx_vmware_datastore_t *datastore)
{
vmware_shared_strfree(datastore->name);
vmware_shared_strfree(datastore->id);
-
- if (NULL != datastore->uuid)
- vmware_shared_strfree(datastore->uuid);
+ vmware_shared_strfree(datastore->uuid);
vmware_vector_str_uint64_pair_shared_clean(&datastore->hv_uuids_access);
zbx_vector_str_uint64_pair_destroy(&datastore->hv_uuids_access);
@@ -2983,7 +2977,6 @@ static zbx_vmware_datastore_t *vmware_service_create_datastore(const zbx_vmware_
"<ns0:propSet>" \
"<ns0:type>Datastore</ns0:type>" \
"<ns0:pathSet>summary</ns0:pathSet>" \
- "<ns0:pathSet>host</ns0:pathSet>" \
"<ns0:pathSet>info</ns0:pathSet>" \
"</ns0:propSet>" \
"<ns0:objectSet>" \
@@ -3021,7 +3014,7 @@ static zbx_vmware_datastore_t *vmware_service_create_datastore(const zbx_vmware_
name = zbx_xml_doc_read_value(doc, ZBX_XPATH_DATASTORE_SUMMARY("name"));
- if (NULL != (path = zbx_xml_doc_read_value(doc, ZBX_XPATH_DATASTORE_MOUNT())))
+ if (NULL != (path = zbx_xml_doc_read_value(doc, ZBX_XPATH_DATASTORE_SUMMARY("url"))))
{
if ('\0' != *path)
{
@@ -3040,6 +3033,12 @@ static zbx_vmware_datastore_t *vmware_service_create_datastore(const zbx_vmware_
}
zbx_free(path);
}
+ else
+ {
+ zabbix_log(LOG_LEVEL_DEBUG, "%s() datastore uuid not present for id:'%s'", __func__, id);
+ zbx_free(name);
+ goto out;
+ }
if (ZBX_VMWARE_TYPE_VSPHERE == service->type)
{
@@ -3835,13 +3834,6 @@ static int vmware_service_init_hv(zbx_vmware_service_t *service, CURL *easyhandl
hv_ds_access.value = vmware_hv_get_ds_access(details, ds->id);
zbx_vector_str_uint64_pair_append_ptr(&ds->hv_uuids_access, &hv_ds_access);
- if (NULL == ds->uuid)
- {
- zabbix_log(LOG_LEVEL_WARNING, "%s(): Datastore \"%s\" does not have uuid.", __func__,
- datastores.values[i]);
- continue;
- }
-
dsname = (zbx_vmware_dsname_t *)zbx_malloc(NULL, sizeof(zbx_vmware_dsname_t));
dsname->name = zbx_strdup(NULL, ds->name);
dsname->uuid = zbx_strdup(NULL, ds->uuid);
@@ -5329,7 +5321,7 @@ static int vmware_service_initialize(zbx_vmware_service_t *service, CURL *easyha
if (SUCCEED != vmware_service_get_contents(easyhandle, &version, &fullname, error))
goto out;
- if (0 == (service->state & ZBX_VMWARE_STATE_NEW) && 0 == strcmp(service->version, version))
+ if (0 != (service->state & ZBX_VMWARE_STATE_READY) && 0 == strcmp(service->version, version))
{
ret = SUCCEED;
goto out;
diff --git a/templates/app/iis_agent/README.md b/templates/app/iis_agent/README.md
index 7ddad601a14..5ceceb63257 100644
--- a/templates/app/iis_agent/README.md
+++ b/templates/app/iis_agent/README.md
@@ -59,8 +59,8 @@ There are no template links in this template.
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
-|IIS |IIS: World Wide Web Publishing Service (W3SVC) state |<p>The World Wide Web Publishing Service (W3SVC) provides web connectivity and administration of websites through the IIS snap-in. If the World Wide Web Publishing Service stops, the operating system cannot serve any form of web request. This service was dependent on "Windows Process Activation Service".</p> |ZABBIX_PASSIVE |service_state[W3SVC]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-|IIS |IIS: Windows Process Activation Service (WAS) state |<p>Windows Process Activation Service (WAS) is a tool for managing worker processes that contain applications that host Windows Communication Foundation (WCF) services. Worker processes handle requests that are sent to a Web Server for specific application pools. Each application pool sets boundaries for the applications it contains.</p> |ZABBIX_PASSIVE |service_state[WAS]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|IIS |IIS: World Wide Web Publishing Service (W3SVC) state |<p>The World Wide Web Publishing Service (W3SVC) provides web connectivity and administration of websites through the IIS snap-in. If the World Wide Web Publishing Service stops, the operating system cannot serve any form of web request. This service was dependent on "Windows Process Activation Service".</p> |ZABBIX_PASSIVE |service.info[W3SVC]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|IIS |IIS: Windows Process Activation Service (WAS) state |<p>Windows Process Activation Service (WAS) is a tool for managing worker processes that contain applications that host Windows Communication Foundation (WCF) services. Worker processes handle requests that are sent to a Web Server for specific application pools. Each application pool sets boundaries for the applications it contains.</p> |ZABBIX_PASSIVE |service.info[WAS]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|IIS |IIS: {$IIS.PORT} port ping |<p>-</p> |SIMPLE |net.tcp.service[{$IIS.SERVICE},,{$IIS.PORT}]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|IIS |IIS: Uptime |<p>Service uptime in seconds.</p> |ZABBIX_PASSIVE |perf_counter_en["\Web Service(_Total)\Service Uptime"] |
|IIS |IIS: Bytes Received per second |<p>The average rate per minute at which data bytes are received by the service at the Application Layer. Does not include protocol headers or control bytes.</p> |ZABBIX_PASSIVE |perf_counter_en["\Web Service(_Total)\Bytes Received/sec", 60] |
@@ -105,11 +105,11 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|IIS: The World Wide Web Publishing Service (W3SVC) is not running |<p>The World Wide Web Publishing Service (W3SVC) is not in running state. IIS cannot start.</p> |`last(/IIS by Zabbix agent/service_state[W3SVC])<>0` |HIGH |<p>**Depends on**:</p><p>- IIS: Windows process Activation Service (WAS) is not the running</p> |
-|IIS: Windows process Activation Service (WAS) is not the running |<p>Windows Process Activation Service (WAS) is not in the running state. IIS cannot start.</p> |`last(/IIS by Zabbix agent/service_state[WAS])<>0` |HIGH | |
+|IIS: The World Wide Web Publishing Service (W3SVC) is not running |<p>The World Wide Web Publishing Service (W3SVC) is not in the running state. IIS cannot start.</p> |`last(/IIS by Zabbix agent/service.info[W3SVC])<>0` |HIGH |<p>**Depends on**:</p><p>- IIS: Windows process Activation Service (WAS) is not running</p> |
+|IIS: Windows process Activation Service (WAS) is not running |<p>Windows Process Activation Service (WAS) is not in the running state. IIS cannot start.</p> |`last(/IIS by Zabbix agent/service.info[WAS])<>0` |HIGH | |
|IIS: Port {$IIS.PORT} is down |<p>-</p> |`last(/IIS by Zabbix agent/net.tcp.service[{$IIS.SERVICE},,{$IIS.PORT}])=0` |AVERAGE |<p>Manual close: YES</p><p>**Depends on**:</p><p>- IIS: The World Wide Web Publishing Service (W3SVC) is not running</p> |
-|IIS: has been restarted |<p>Uptime is less than 10 minutes</p> |`last(/IIS by Zabbix agent/perf_counter_en["\Web Service(_Total)\Service Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
-|IIS: {#APPPOOL} has been restarted |<p>Uptime is less than 10 minutes</p> |`last(/IIS by Zabbix agent/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Current Application Pool Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
+|IIS: has been restarted |<p>Uptime is less than 10 minutes.</p> |`last(/IIS by Zabbix agent/perf_counter_en["\Web Service(_Total)\Service Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
+|IIS: {#APPPOOL} has been restarted |<p>Uptime is less than 10 minutes.</p> |`last(/IIS by Zabbix agent/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Current Application Pool Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
|IIS: Application pool {#APPPOOL} is not in Running state |<p>-</p> |`last(/IIS by Zabbix agent/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Current Application Pool State"])<>3 and {$IIS.APPPOOL.MONITORED:"{#APPPOOL}"}=1` |HIGH |<p>**Depends on**:</p><p>- IIS: The World Wide Web Publishing Service (W3SVC) is not running</p> |
|IIS: Application pool {#APPPOOL} has been recycled |<p>-</p> |`last(/IIS by Zabbix agent/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Total Application Pool Recycles"],#1)<>last(/IIS by Zabbix agent/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Total Application Pool Recycles"],#2) and {$IIS.APPPOOL.MONITORED:"{#APPPOOL}"}=1` |INFO | |
|IIS: Request queue of {#APPPOOL} is too large |<p>-</p> |`min(/IIS by Zabbix agent/perf_counter_en["\HTTP Service Request Queues({#APPPOOL})\CurrentQueueSize"],{$IIS.QUEUE.MAX.TIME})>{$IIS.QUEUE.MAX.WARN}` |WARNING |<p>**Depends on**:</p><p>- IIS: Application pool {#APPPOOL} is not in Running state</p> |
diff --git a/templates/app/iis_agent/template_app_iis_agent.yaml b/templates/app/iis_agent/template_app_iis_agent.yaml
index 61b7542a89c..e264bfbb7bb 100644
--- a/templates/app/iis_agent/template_app_iis_agent.yaml
+++ b/templates/app/iis_agent/template_app_iis_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-04-06T19:27:43Z'
+ date: '2022-06-14T13:45:14Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -51,7 +51,7 @@ zabbix_export:
dependencies:
-
name: 'IIS: The World Wide Web Publishing Service (W3SVC) is not running'
- expression: 'last(/IIS by Zabbix agent/service_state[W3SVC])<>0'
+ expression: 'last(/IIS by Zabbix agent/service.info[W3SVC])<>0'
tags:
-
tag: scope
@@ -448,7 +448,7 @@ zabbix_export:
name: 'IIS: has been restarted'
event_name: 'IIS: has been restarted (uptime < 10m)'
priority: INFO
- description: 'Uptime is less than 10 minutes'
+ description: 'Uptime is less than 10 minutes.'
manual_close: 'YES'
tags:
-
@@ -585,7 +585,7 @@ zabbix_export:
-
uuid: 7d76ec3d35f6446c842595c4831fabf4
name: 'IIS: World Wide Web Publishing Service (W3SVC) state'
- key: 'service_state[W3SVC]'
+ key: 'service.info[W3SVC]'
history: 7d
description: 'The World Wide Web Publishing Service (W3SVC) provides web connectivity and administration of websites through the IIS snap-in. If the World Wide Web Publishing Service stops, the operating system cannot serve any form of web request. This service was dependent on "Windows Process Activation Service".'
valuemap:
@@ -605,14 +605,14 @@ zabbix_export:
triggers:
-
uuid: cac6bbb13ce84772a976275da0805179
- expression: 'last(/IIS by Zabbix agent/service_state[W3SVC])<>0'
+ expression: 'last(/IIS by Zabbix agent/service.info[W3SVC])<>0'
name: 'IIS: The World Wide Web Publishing Service (W3SVC) is not running'
priority: HIGH
- description: 'The World Wide Web Publishing Service (W3SVC) is not in running state. IIS cannot start.'
+ description: 'The World Wide Web Publishing Service (W3SVC) is not in the running state. IIS cannot start.'
dependencies:
-
- name: 'IIS: Windows process Activation Service (WAS) is not the running'
- expression: 'last(/IIS by Zabbix agent/service_state[WAS])<>0'
+ name: 'IIS: Windows process Activation Service (WAS) is not running'
+ expression: 'last(/IIS by Zabbix agent/service.info[WAS])<>0'
tags:
-
tag: scope
@@ -620,7 +620,7 @@ zabbix_export:
-
uuid: 38858eb3cc2f4580a0a3c56c11fdeba5
name: 'IIS: Windows Process Activation Service (WAS) state'
- key: 'service_state[WAS]'
+ key: 'service.info[WAS]'
history: 7d
description: 'Windows Process Activation Service (WAS) is a tool for managing worker processes that contain applications that host Windows Communication Foundation (WCF) services. Worker processes handle requests that are sent to a Web Server for specific application pools. Each application pool sets boundaries for the applications it contains.'
valuemap:
@@ -640,8 +640,8 @@ zabbix_export:
triggers:
-
uuid: 98fc8e403ac0405e84bf533336c4102c
- expression: 'last(/IIS by Zabbix agent/service_state[WAS])<>0'
- name: 'IIS: Windows process Activation Service (WAS) is not the running'
+ expression: 'last(/IIS by Zabbix agent/service.info[WAS])<>0'
+ name: 'IIS: Windows process Activation Service (WAS) is not running'
priority: HIGH
description: 'Windows Process Activation Service (WAS) is not in the running state. IIS cannot start.'
tags:
@@ -696,7 +696,7 @@ zabbix_export:
dependencies:
-
name: 'IIS: The World Wide Web Publishing Service (W3SVC) is not running'
- expression: 'last(/IIS by Zabbix agent/service_state[W3SVC])<>0'
+ expression: 'last(/IIS by Zabbix agent/service.info[W3SVC])<>0'
tags:
-
tag: scope
@@ -723,7 +723,7 @@ zabbix_export:
name: 'IIS: {#APPPOOL} has been restarted'
event_name: 'IIS: {#APPPOOL} has been restarted (uptime < 10m)'
priority: INFO
- description: 'Uptime is less than 10 minutes'
+ description: 'Uptime is less than 10 minutes.'
manual_close: 'YES'
tags:
-
diff --git a/templates/app/iis_agent_active/README.md b/templates/app/iis_agent_active/README.md
index 68f80e3d7c5..a25619c0fa1 100644
--- a/templates/app/iis_agent_active/README.md
+++ b/templates/app/iis_agent_active/README.md
@@ -59,8 +59,8 @@ There are no template links in this template.
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
-|IIS |IIS: World Wide Web Publishing Service (W3SVC) state |<p>The World Wide Web Publishing Service (W3SVC) provides web connectivity and administration of websites through the IIS snap-in. If the World Wide Web Publishing Service stops, the operating system cannot serve any form of web request. This service was dependent on "Windows Process Activation Service".</p> |ZABBIX_ACTIVE |service_state[W3SVC]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
-|IIS |IIS: Windows Process Activation Service (WAS) state |<p>Windows Process Activation Service (WAS) is a tool for managing worker processes that contain applications that host Windows Communication Foundation (WCF) services. Worker processes handle requests that are sent to a Web Server for specific application pools. Each application pool sets boundaries for the applications it contains.</p> |ZABBIX_ACTIVE |service_state[WAS]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|IIS |IIS: World Wide Web Publishing Service (W3SVC) state |<p>The World Wide Web Publishing Service (W3SVC) provides web connectivity and administration of websites through the IIS snap-in. If the World Wide Web Publishing Service stops, the operating system cannot serve any form of web request. This service was dependent on "Windows Process Activation Service".</p> |ZABBIX_ACTIVE |service.info[W3SVC]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|IIS |IIS: Windows Process Activation Service (WAS) state |<p>Windows Process Activation Service (WAS) is a tool for managing worker processes that contain applications that host Windows Communication Foundation (WCF) services. Worker processes handle requests that are sent to a Web Server for specific application pools. Each application pool sets boundaries for the applications it contains.</p> |ZABBIX_ACTIVE |service.info[WAS]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|IIS |IIS: {$IIS.PORT} port ping |<p>-</p> |SIMPLE |net.tcp.service[{$IIS.SERVICE},,{$IIS.PORT}]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
|IIS |IIS: Uptime |<p>Service uptime in seconds.</p> |ZABBIX_ACTIVE |perf_counter_en["\Web Service(_Total)\Service Uptime"] |
|IIS |IIS: Bytes Received per second |<p>The average rate per minute at which data bytes are received by the service at the Application Layer. Does not include protocol headers or control bytes.</p> |ZABBIX_ACTIVE |perf_counter_en["\Web Service(_Total)\Bytes Received/sec", 60] |
@@ -105,11 +105,11 @@ There are no template links in this template.
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|IIS: The World Wide Web Publishing Service (W3SVC) is not running |<p>The World Wide Web Publishing Service (W3SVC) is not in running state. IIS cannot start.</p> |`last(/IIS by Zabbix agent active/service_state[W3SVC])<>0` |HIGH |<p>**Depends on**:</p><p>- IIS: Windows process Activation Service (WAS) is not the running</p> |
-|IIS: Windows process Activation Service (WAS) is not the running |<p>Windows Process Activation Service (WAS) is not in the running state. IIS cannot start.</p> |`last(/IIS by Zabbix agent active/service_state[WAS])<>0` |HIGH | |
+|IIS: The World Wide Web Publishing Service (W3SVC) is not running |<p>The World Wide Web Publishing Service (W3SVC) is not in the running state. IIS cannot start.</p> |`last(/IIS by Zabbix agent active/service.info[W3SVC])<>0` |HIGH |<p>**Depends on**:</p><p>- IIS: Windows process Activation Service (WAS) is not running</p> |
+|IIS: Windows process Activation Service (WAS) is not running |<p>Windows Process Activation Service (WAS) is not in the running state. IIS cannot start.</p> |`last(/IIS by Zabbix agent active/service.info[WAS])<>0` |HIGH | |
|IIS: Port {$IIS.PORT} is down |<p>-</p> |`last(/IIS by Zabbix agent active/net.tcp.service[{$IIS.SERVICE},,{$IIS.PORT}])=0` |AVERAGE |<p>Manual close: YES</p><p>**Depends on**:</p><p>- IIS: The World Wide Web Publishing Service (W3SVC) is not running</p> |
-|IIS: has been restarted |<p>Uptime is less than 10 minutes</p> |`last(/IIS by Zabbix agent active/perf_counter_en["\Web Service(_Total)\Service Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
-|IIS: {#APPPOOL} has been restarted |<p>Uptime is less than 10 minutes</p> |`last(/IIS by Zabbix agent active/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Current Application Pool Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
+|IIS: has been restarted |<p>Uptime is less than 10 minutes.</p> |`last(/IIS by Zabbix agent active/perf_counter_en["\Web Service(_Total)\Service Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
+|IIS: {#APPPOOL} has been restarted |<p>Uptime is less than 10 minutes.</p> |`last(/IIS by Zabbix agent active/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Current Application Pool Uptime"])<10m` |INFO |<p>Manual close: YES</p> |
|IIS: Application pool {#APPPOOL} is not in Running state |<p>-</p> |`last(/IIS by Zabbix agent active/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Current Application Pool State"])<>3 and {$IIS.APPPOOL.MONITORED:"{#APPPOOL}"}=1` |HIGH |<p>**Depends on**:</p><p>- IIS: The World Wide Web Publishing Service (W3SVC) is not running</p> |
|IIS: Application pool {#APPPOOL} has been recycled |<p>-</p> |`last(/IIS by Zabbix agent active/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Total Application Pool Recycles"],#1)<>last(/IIS by Zabbix agent active/perf_counter_en["\APP_POOL_WAS({#APPPOOL})\Total Application Pool Recycles"],#2) and {$IIS.APPPOOL.MONITORED:"{#APPPOOL}"}=1` |INFO | |
|IIS: Request queue of {#APPPOOL} is too large |<p>-</p> |`min(/IIS by Zabbix agent active/perf_counter_en["\HTTP Service Request Queues({#APPPOOL})\CurrentQueueSize"],{$IIS.QUEUE.MAX.TIME})>{$IIS.QUEUE.MAX.WARN}` |WARNING |<p>**Depends on**:</p><p>- IIS: Application pool {#APPPOOL} is not in Running state</p> |
diff --git a/templates/app/iis_agent_active/template_app_iis_agent_active.yaml b/templates/app/iis_agent_active/template_app_iis_agent_active.yaml
index 3b5d091d779..c7f5163fbc8 100644
--- a/templates/app/iis_agent_active/template_app_iis_agent_active.yaml
+++ b/templates/app/iis_agent_active/template_app_iis_agent_active.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-04-06T19:27:44Z'
+ date: '2022-06-14T13:45:22Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -51,7 +51,7 @@ zabbix_export:
dependencies:
-
name: 'IIS: The World Wide Web Publishing Service (W3SVC) is not running'
- expression: 'last(/IIS by Zabbix agent active/service_state[W3SVC])<>0'
+ expression: 'last(/IIS by Zabbix agent active/service.info[W3SVC])<>0'
tags:
-
tag: scope
@@ -474,7 +474,7 @@ zabbix_export:
name: 'IIS: has been restarted'
event_name: 'IIS: has been restarted (uptime < 10m)'
priority: INFO
- description: 'Uptime is less than 10 minutes'
+ description: 'Uptime is less than 10 minutes.'
manual_close: 'YES'
tags:
-
@@ -620,7 +620,7 @@ zabbix_export:
uuid: 31d4f21ce4c6412b912ca65fe9aee83c
name: 'IIS: World Wide Web Publishing Service (W3SVC) state'
type: ZABBIX_ACTIVE
- key: 'service_state[W3SVC]'
+ key: 'service.info[W3SVC]'
history: 7d
description: 'The World Wide Web Publishing Service (W3SVC) provides web connectivity and administration of websites through the IIS snap-in. If the World Wide Web Publishing Service stops, the operating system cannot serve any form of web request. This service was dependent on "Windows Process Activation Service".'
valuemap:
@@ -640,14 +640,14 @@ zabbix_export:
triggers:
-
uuid: ca139b8b27c44f7fb22938fd3419cff5
- expression: 'last(/IIS by Zabbix agent active/service_state[W3SVC])<>0'
+ expression: 'last(/IIS by Zabbix agent active/service.info[W3SVC])<>0'
name: 'IIS: The World Wide Web Publishing Service (W3SVC) is not running'
priority: HIGH
- description: 'The World Wide Web Publishing Service (W3SVC) is not in running state. IIS cannot start.'
+ description: 'The World Wide Web Publishing Service (W3SVC) is not in the running state. IIS cannot start.'
dependencies:
-
- name: 'IIS: Windows process Activation Service (WAS) is not the running'
- expression: 'last(/IIS by Zabbix agent active/service_state[WAS])<>0'
+ name: 'IIS: Windows process Activation Service (WAS) is not running'
+ expression: 'last(/IIS by Zabbix agent active/service.info[WAS])<>0'
tags:
-
tag: scope
@@ -656,7 +656,7 @@ zabbix_export:
uuid: 7f016ce3ab3941b78741dfb72c6d7693
name: 'IIS: Windows Process Activation Service (WAS) state'
type: ZABBIX_ACTIVE
- key: 'service_state[WAS]'
+ key: 'service.info[WAS]'
history: 7d
description: 'Windows Process Activation Service (WAS) is a tool for managing worker processes that contain applications that host Windows Communication Foundation (WCF) services. Worker processes handle requests that are sent to a Web Server for specific application pools. Each application pool sets boundaries for the applications it contains.'
valuemap:
@@ -676,8 +676,8 @@ zabbix_export:
triggers:
-
uuid: d7a16291f196424ca9c39af4271ab78f
- expression: 'last(/IIS by Zabbix agent active/service_state[WAS])<>0'
- name: 'IIS: Windows process Activation Service (WAS) is not the running'
+ expression: 'last(/IIS by Zabbix agent active/service.info[WAS])<>0'
+ name: 'IIS: Windows process Activation Service (WAS) is not running'
priority: HIGH
description: 'Windows Process Activation Service (WAS) is not in the running state. IIS cannot start.'
tags:
@@ -734,7 +734,7 @@ zabbix_export:
dependencies:
-
name: 'IIS: The World Wide Web Publishing Service (W3SVC) is not running'
- expression: 'last(/IIS by Zabbix agent active/service_state[W3SVC])<>0'
+ expression: 'last(/IIS by Zabbix agent active/service.info[W3SVC])<>0'
tags:
-
tag: scope
@@ -762,7 +762,7 @@ zabbix_export:
name: 'IIS: {#APPPOOL} has been restarted'
event_name: 'IIS: {#APPPOOL} has been restarted (uptime < 10m)'
priority: INFO
- description: 'Uptime is less than 10 minutes'
+ description: 'Uptime is less than 10 minutes.'
manual_close: 'YES'
tags:
-
diff --git a/templates/app/ntp_service/README.md b/templates/app/ntp_service/README.md
index c93f796d847..11560551549 100644
--- a/templates/app/ntp_service/README.md
+++ b/templates/app/ntp_service/README.md
@@ -25,13 +25,13 @@ There are no template links in this template.
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
-|Services |NTP service is running |<p>-</p> |SIMPLE |net.tcp.service[ntp] |
+|Services |NTP service is running |<p>-</p> |SIMPLE |net.udp.service[ntp] |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|NTP service is down on {HOST.NAME} |<p>-</p> |`max(/NTP Service/net.tcp.service[ntp],#3)=0` |AVERAGE | |
+|NTP service is down on {HOST.NAME} |<p>-</p> |`max(/NTP Service/net.udp.service[ntp],#3)=0` |AVERAGE | |
## Feedback
diff --git a/templates/app/ntp_service/template_app_ntp_service.yaml b/templates/app/ntp_service/template_app_ntp_service.yaml
index bfbc0c3d0f9..6a0f69b9cb9 100644
--- a/templates/app/ntp_service/template_app_ntp_service.yaml
+++ b/templates/app/ntp_service/template_app_ntp_service.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-04-06T19:33:43Z'
+ date: '2022-06-07T09:59:15Z'
groups:
-
uuid: 57b7ae836ca64446ba2c296389c009b7
@@ -22,7 +22,7 @@ zabbix_export:
uuid: e6101cf9015e436e82d9203f638f1840
name: 'NTP service is running'
type: SIMPLE
- key: 'net.tcp.service[ntp]'
+ key: 'net.udp.service[ntp]'
history: 1w
valuemap:
name: 'Service state'
@@ -33,7 +33,7 @@ zabbix_export:
triggers:
-
uuid: 6c7f4d7e2719401d8fd8f99ae8fc2d34
- expression: 'max(/NTP Service/net.tcp.service[ntp],#3)=0'
+ expression: 'max(/NTP Service/net.udp.service[ntp],#3)=0'
name: 'NTP service is down on {HOST.NAME}'
priority: AVERAGE
tags:
diff --git a/templates/app/pfsense_snmp/README.md b/templates/app/pfsense_snmp/README.md
index 2af5ad058ff..348d24f7359 100644
--- a/templates/app/pfsense_snmp/README.md
+++ b/templates/app/pfsense_snmp/README.md
@@ -28,15 +28,9 @@ No specific Zabbix configuration is required.
|Name|Description|Default|
|----|-----------|-------|
-|{$CPU.UTIL.CRIT} |<p>Threshold of CPU utilization for warning trigger in %.</p> |`90` |
-|{$ICMP_LOSS_WARN} |<p>Threshold of ICMP packets loss for warning trigger in %.</p> |`20` |
-|{$ICMP_RESPONSE_TIME_WARN} |<p>Threshold of average ICMP response time for warning trigger in seconds.</p> |`0.15` |
|{$IF.ERRORS.WARN} |<p>Threshold of error packets rate for warning trigger. Can be used with interface name as context.</p> |`2` |
|{$IF.UTIL.MAX} |<p>Threshold of interface bandwidth utilization for warning trigger in %. Can be used with interface name as context.</p> |`90` |
|{$IFCONTROL} |<p>Macro for operational state of the interface for link down trigger. Can be used with interface name as context.</p> |`1` |
-|{$LOAD_AVG_PER_CPU.MAX.WARN} |<p>Load per CPU considered sustainable. Tune if needed.</p> |`1.5` |
-|{$MEMORY.AVAILABLE.MIN} |<p>Threshold of available memory for trigger in bytes.</p> |`20M` |
-|{$MEMORY.UTIL.MAX} |<p>Threshold of memory utilization for trigger in %</p> |`90` |
|{$NET.IF.IFADMINSTATUS.MATCHES} |<p>This macro is used in filters of network interfaces discovery rule.</p> |`^.*` |
|{$NET.IF.IFADMINSTATUS.NOT_MATCHES} |<p>Ignore down(2) administrative status</p> |`^2$` |
|{$NET.IF.IFALIAS.MATCHES} |<p>This macro is used in filters of network interfaces discovery rule.</p> |`.*` |
@@ -52,19 +46,6 @@ No specific Zabbix configuration is required.
|{$SNMP.TIMEOUT} |<p>The time interval for SNMP availability trigger.</p> |`5m` |
|{$SOURCE.TRACKING.TABLE.UTIL.MAX} |<p>Threshold of source tracking table utilization trigger in %.</p> |`90` |
|{$STATE.TABLE.UTIL.MAX} |<p>Threshold of state table utilization trigger in %.</p> |`90` |
-|{$SWAP.PFREE.MIN.WARN} |<p>Threshold of free swap space for warning trigger in %.</p> |`50` |
-|{$VFS.DEV.DEVNAME.MATCHES} |<p>This macro is used in block devices discovery. Can be overridden on the host or linked template level</p> |`.+` |
-|{$VFS.DEV.DEVNAME.NOT_MATCHES} |<p>This macro is used in block devices discovery. Can be overridden on the host or linked template level</p> |`^(loop[0-9]*|sd[a-z][0-9]+|nbd[0-9]+|sr[0-9]+|fd[0-9]+|dm-[0-9]+|ram[0-9]+|ploop[a-z0-9]+|md[0-9]*|hcp[0-9]*|cd[0-9]*|pass[0-9]*|zram[0-9]*)` |
-|{$VFS.FS.FREE.MIN.CRIT} |<p>The critical threshold of the filesystem utilization.</p> |`5G` |
-|{$VFS.FS.FREE.MIN.WARN} |<p>The warning threshold of the filesystem utilization.</p> |`10G` |
-|{$VFS.FS.FSNAME.MATCHES} |<p>This macro is used in filesystems discovery. Can be overridden on the host or linked template level</p> |`.+` |
-|{$VFS.FS.FSNAME.NOT_MATCHES} |<p>This macro is used in filesystems discovery. Can be overridden on the host or linked template level</p> |`^(/dev|/sys|/run|/var/run|/proc|.+/shm$)` |
-|{$VFS.FS.FSTYPE.MATCHES} |<p>This macro is used in filesystems discovery. Can be overridden on the host or linked template level</p> |`.*(9.3|hrFSBerkeleyFFS)$` |
-|{$VFS.FS.FSTYPE.NOT_MATCHES} |<p>This macro is used in filesystems discovery. Can be overridden on the host or linked template level</p> |`^\s$` |
-|{$VFS.FS.INODE.PFREE.MIN.CRIT} |<p>Threshold of inodes usage for average severity trigger in %. Can be used with filesystem name as context.</p> |`10` |
-|{$VFS.FS.INODE.PFREE.MIN.WARN} |<p>Threshold of inodes usage for warning trigger in %. Can be used with filesystem name as context.</p> |`20` |
-|{$VFS.FS.PUSED.MAX.CRIT} |<p>Threshold of filesystem used space for average severity trigger in %. Can be used with filesystem name as context.</p> |`90` |
-|{$VFS.FS.PUSED.MAX.WARN} |<p>Threshold of used filesystem space for warning trigger in %. Can be used with filesystem name as context.</p> |`80` |
## Template links
@@ -74,42 +55,12 @@ There are no template links in this template.
|Name|Description|Type|Key and additional info|
|----|-----------|----|----|
-|Block devices discovery |<p>Block devices are discovered from UCD-DISKIO-MIB::diskIOTable (http://net-snmp.sourceforge.net/docs/mibs/ucdDiskIOMIB.html#diskIOTable).</p> |SNMP |vfs.dev.discovery<p>**Filter**:</p>AND <p>- {#DEVNAME} MATCHES_REGEX `{$VFS.DEV.DEVNAME.MATCHES}`</p><p>- {#DEVNAME} NOT_MATCHES_REGEX `{$VFS.DEV.DEVNAME.NOT_MATCHES}`</p> |
-|CPU discovery |<p>This discovery will create set of per core CPU metrics from UCD-SNMP-MIB, using {#CPU.COUNT} in preprocessing. That's the only reason why LLD is used.</p> |DEPENDENT |cpu.discovery<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
-|Mounted filesystem discovery |<p>HOST-RESOURCES-MIB::hrFS discovery with filter.</p> |SNMP |vfs.fs.discovery<p>**Filter**:</p>AND <p>- {#FSTYPE} MATCHES_REGEX `{$VFS.FS.FSTYPE.MATCHES}`</p><p>- {#FSTYPE} NOT_MATCHES_REGEX `{$VFS.FS.FSTYPE.NOT_MATCHES}`</p><p>- {#FSNAME} MATCHES_REGEX `{$VFS.FS.FSNAME.MATCHES}`</p><p>- {#FSNAME} NOT_MATCHES_REGEX `{$VFS.FS.FSNAME.NOT_MATCHES}`</p> |
-|Network interfaces discovery |<p>Discovering interfaces from IF-MIB.</p> |SNMP |net.if.discovery<p>**Filter**:</p>AND <p>- {#IFADMINSTATUS} MATCHES_REGEX `{$NET.IF.IFADMINSTATUS.MATCHES}`</p><p>- {#IFADMINSTATUS} NOT_MATCHES_REGEX `{$NET.IF.IFADMINSTATUS.NOT_MATCHES}`</p><p>- {#IFOPERSTATUS} MATCHES_REGEX `{$NET.IF.IFOPERSTATUS.MATCHES}`</p><p>- {#IFOPERSTATUS} NOT_MATCHES_REGEX `{$NET.IF.IFOPERSTATUS.NOT_MATCHES}`</p><p>- {#IFNAME} MATCHES_REGEX `{$NET.IF.IFNAME.MATCHES}`</p><p>- {#IFNAME} NOT_MATCHES_REGEX `{$NET.IF.IFNAME.NOT_MATCHES}`</p><p>- {#IFDESCR} MATCHES_REGEX `{$NET.IF.IFDESCR.MATCHES}`</p><p>- {#IFDESCR} NOT_MATCHES_REGEX `{$NET.IF.IFDESCR.NOT_MATCHES}`</p><p>- {#IFALIAS} MATCHES_REGEX `{$NET.IF.IFALIAS.MATCHES}`</p><p>- {#IFALIAS} NOT_MATCHES_REGEX `{$NET.IF.IFALIAS.NOT_MATCHES}`</p><p>- {#IFTYPE} MATCHES_REGEX `{$NET.IF.IFTYPE.MATCHES}`</p><p>- {#IFTYPE} NOT_MATCHES_REGEX `{$NET.IF.IFTYPE.NOT_MATCHES}`</p> |
+|Network interfaces discovery |<p>Discovering interfaces from IF-MIB.</p> |SNMP |pfsense.net.if.discovery<p>**Filter**:</p>AND <p>- {#IFADMINSTATUS} MATCHES_REGEX `{$NET.IF.IFADMINSTATUS.MATCHES}`</p><p>- {#IFADMINSTATUS} NOT_MATCHES_REGEX `{$NET.IF.IFADMINSTATUS.NOT_MATCHES}`</p><p>- {#IFOPERSTATUS} MATCHES_REGEX `{$NET.IF.IFOPERSTATUS.MATCHES}`</p><p>- {#IFOPERSTATUS} NOT_MATCHES_REGEX `{$NET.IF.IFOPERSTATUS.NOT_MATCHES}`</p><p>- {#IFNAME} MATCHES_REGEX `{$NET.IF.IFNAME.MATCHES}`</p><p>- {#IFNAME} NOT_MATCHES_REGEX `{$NET.IF.IFNAME.NOT_MATCHES}`</p><p>- {#IFDESCR} MATCHES_REGEX `{$NET.IF.IFDESCR.MATCHES}`</p><p>- {#IFDESCR} NOT_MATCHES_REGEX `{$NET.IF.IFDESCR.NOT_MATCHES}`</p><p>- {#IFALIAS} MATCHES_REGEX `{$NET.IF.IFALIAS.MATCHES}`</p><p>- {#IFALIAS} NOT_MATCHES_REGEX `{$NET.IF.IFALIAS.NOT_MATCHES}`</p><p>- {#IFTYPE} MATCHES_REGEX `{$NET.IF.IFTYPE.MATCHES}`</p><p>- {#IFTYPE} NOT_MATCHES_REGEX `{$NET.IF.IFTYPE.NOT_MATCHES}`</p> |
## Items collected
|Group|Name|Description|Type|Key and additional info|
|-----|----|-----------|----|---------------------|
-|CPU |PFSense: Interrupts per second |<p>MIB: UCD-SNMP-MIB</p><p>Number of interrupts processed.</p> |SNMP |system.cpu.intr<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
-|CPU |PFSense: Context switches per second |<p>MIB: UCD-SNMP-MIB</p><p>Number of context switches.</p> |SNMP |system.cpu.switches<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
-|CPU |PFSense: Load average (1m avg) |<p>MIB: UCD-SNMP-MIB</p><p>The 1 minute load averages.</p> |SNMP |system.cpu.load.avg1 |
-|CPU |PFSense: Load average (5m avg) |<p>MIB: UCD-SNMP-MIB</p><p>The 5 minutes load averages.</p> |SNMP |system.cpu.load.avg5 |
-|CPU |PFSense: Load average (15m avg) |<p>MIB: UCD-SNMP-MIB</p><p>The 15 minutes load averages.</p> |SNMP |system.cpu.load.avg15 |
-|CPU |PFSense: Number of CPUs |<p>MIB: HOST-RESOURCES-MIB</p><p>Count the number of CPU cores by counting number of cores discovered in hrProcessorTable using LLD.</p> |SNMP |system.cpu.num<p>**Preprocessing**:</p><p>- JAVASCRIPT: `//count the number of cores return JSON.parse(value).length; `</p> |
-|CPU |PFSense: CPU idle time |<p>MIB: UCD-SNMP-MIB</p><p>The time the CPU has spent doing nothing.</p> |SNMP |system.cpu.idle[{#SNMPINDEX}] |
-|CPU |PFSense: CPU system time |<p>MIB: UCD-SNMP-MIB</p><p>The time the CPU has spent running the kernel and its processes.</p> |SNMP |system.cpu.system[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p><p>- JAVASCRIPT: `//to get utilization in %, divide by N, where N is number of cores. return value/{#CPU.COUNT} `</p> |
-|CPU |PFSense: CPU user time |<p>MIB: UCD-SNMP-MIB</p><p>The time the CPU has spent running users' processes that are not niced.</p> |SNMP |system.cpu.user[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p><p>- JAVASCRIPT: `//to get utilization in %, divide by N, where N is number of cores. return value/{#CPU.COUNT} `</p> |
-|CPU |PFSense: CPU nice time |<p>MIB: UCD-SNMP-MIB</p><p>The time the CPU has spent running users' processes that have been niced.</p> |SNMP |system.cpu.nice[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p><p>- JAVASCRIPT: `//to get utilization in %, divide by N, where N is number of cores. return value/{#CPU.COUNT} `</p> |
-|CPU |PFSense: CPU iowait time |<p>MIB: UCD-SNMP-MIB</p><p>Amount of time the CPU has been waiting for I/O to complete.</p> |SNMP |system.cpu.iowait[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p><p>- JAVASCRIPT: `//to get utilization in %, divide by N, where N is number of cores. return value/{#CPU.COUNT} `</p> |
-|CPU |PFSense: CPU interrupt time |<p>MIB: UCD-SNMP-MIB</p><p>The amount of time the CPU has been servicing hardware interrupts.</p> |SNMP |system.cpu.interrupt[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p><p>- JAVASCRIPT: `//to get utilization in %, divide by N, where N is number of cores. return value/{#CPU.COUNT} `</p> |
-|CPU |PFSense: CPU utilization |<p>CPU utilization in %.</p> |DEPENDENT |system.cpu.util[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `//Calculate utilization return (100 - value) `</p> |
-|General |PFSense: System contact details |<p>MIB: SNMPv2-MIB</p><p>The textual identification of the contact person for this managed node, together with information on how to contact this person. If no contact information is known, the value is the zero-length string.</p> |SNMP |system.contact<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-|General |PFSense: System description |<p>MIB: SNMPv2-MIB</p><p>System description of the host.</p> |SNMP |system.descr<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-|General |PFSense: System location |<p>MIB: SNMPv2-MIB</p><p>The physical location of this node. If the location is unknown, the value is the zero-length string.</p> |SNMP |system.location<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-|General |PFSense: System name |<p>MIB: SNMPv2-MIB</p><p>System host name.</p> |SNMP |system.name<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-|General |PFSense: System object ID |<p>MIB: SNMPv2-MIB</p><p>The vendor authoritative identification of the network management subsystem contained in the entity. This value is allocated within the SMI enterprises subtree (1.3.6.1.4.1) and provides an easy and unambiguous means for determining what kind of box is being managed.</p> |SNMP |system.objectid<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
-|Memory |PFSense: Free memory |<p>MIB: UCD-SNMP-MIB</p><p>The amount of real/physical memory currently unused or available.</p> |SNMP |vm.memory.free<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p> |
-|Memory |PFSense: Memory (buffers) |<p>MIB: UCD-SNMP-MIB</p><p>The total amount of real or virtual memory currently allocated for use as memory buffers.</p> |SNMP |vm.memory.buffers<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p> |
-|Memory |PFSense: Memory (cached) |<p>MIB: UCD-SNMP-MIB</p><p>The total amount of real or virtual memory currently allocated for use as cached memory.</p> |SNMP |vm.memory.cached<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p> |
-|Memory |PFSense: Total memory |<p>MIB: UCD-SNMP-MIB</p><p>Total memory in Bytes.</p> |SNMP |vm.memory.total<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p> |
-|Memory |PFSense: Available memory |<p>Please note that memory utilization is a rough estimate, since memory available is calculated as free+buffers+cached, which is not 100% accurate, but the best we can get using SNMP.</p> |CALCULATED |vm.memory.available<p>**Expression**:</p>`last(//vm.memory.free)+last(//vm.memory.buffers)+last(//vm.memory.cached)` |
-|Memory |PFSense: Memory utilization |<p>Please note that memory utilization is a rough estimate, since memory available is calculated as free+buffers+cached, which is not 100% accurate, but the best we can get using SNMP.</p> |CALCULATED |vm.memory.util<p>**Expression**:</p>`(last(//vm.memory.total)-(last(//vm.memory.free)+last(//vm.memory.buffers)+last(//vm.memory.cached)))/last(//vm.memory.total)*100` |
-|Memory |PFSense: Total swap space |<p>MIB: UCD-SNMP-MIB</p><p>The total amount of swap space configured for this host.</p> |SNMP |system.swap.total<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p> |
-|Memory |PFSense: Free swap space |<p>MIB: UCD-SNMP-MIB</p><p>The amount of swap space currently unused or available.</p> |SNMP |system.swap.free<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p> |
-|Memory |PFSense: Free swap space in % |<p>The free space of swap volume/file in percent.</p> |CALCULATED |system.swap.pfree<p>**Expression**:</p>`last(//system.swap.free)/last(//system.swap.total)*100` |
|Network interfaces |PFSense: Interface [{#IFNAME}({#IFALIAS})]: Inbound packets discarded |<p>MIB: IF-MIB</p><p>The number of inbound packets which were chosen to be discarded</p><p>even though no errors had been detected to prevent their being deliverable to a higher-layer protocol.</p><p>One possible reason for discarding such a packet could be to free up buffer space.</p><p>Discontinuities in the value of this counter can occur at re-initialization of the management system,</p><p>and at other times as indicated by the value of ifCounterDiscontinuityTime.</p> |SNMP |net.if.in.discards[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND: ``</p> |
|Network interfaces |PFSense: Interface [{#IFNAME}({#IFALIAS})]: Inbound packets with errors |<p>MIB: IF-MIB</p><p>For packet-oriented interfaces, the number of inbound packets that contained errors preventing them from being deliverable to a higher-layer protocol. For character-oriented or fixed-length interfaces, the number of inbound transmission units that contained errors preventing them from being deliverable to a higher-layer protocol. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime.</p> |SNMP |net.if.in.errors[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND: ``</p> |
|Network interfaces |PFSense: Interface [{#IFNAME}({#IFALIAS})]: Bits received |<p>MIB: IF-MIB</p><p>The total number of octets received on the interface, including framing characters. This object is a 64-bit version of ifInOctets. Discontinuities in the value of this counter can occur at re-initialization of the management system, and at other times as indicated by the value of ifCounterDiscontinuityTime.</p> |SNMP |net.if.in[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND: ``</p><p>- MULTIPLIER: `8`</p> |
@@ -153,29 +104,12 @@ There are no template links in this template.
|pfSense |PFSense: Normalized packets |<p>MIB: BEGEMOT-PF-MIB</p><p>True if the packet was logged with the specified packet filter reason code. The known codes are: match, bad-offset, fragment, short, normalize, and memory.</p> |SNMP |pfsense.packets.normalize<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|pfSense |PFSense: Packets dropped due to memory limitation |<p>MIB: BEGEMOT-PF-MIB</p><p>True if the packet was logged with the specified packet filter reason code. The known codes are: match, bad-offset, fragment, short, normalize, and memory.</p> |SNMP |pfsense.packets.mem.drop<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
|pfSense |PFSense: Firewall rules count |<p>MIB: BEGEMOT-PF-MIB</p><p>The number of labeled filter rules on this system.</p> |SNMP |pfsense.rules.count |
-|Status |PFSense: ICMP ping |<p>Host accessibility by ICMP.</p><p>0 - ICMP ping fails.</p><p>1 - ICMP ping successful.</p> |SIMPLE |icmpping |
-|Status |PFSense: ICMP loss |<p>Percentage of lost packets.</p> |SIMPLE |icmppingloss |
-|Status |PFSense: ICMP response time |<p>ICMP ping response time (in seconds).</p> |SIMPLE |icmppingsec |
-|Status |PFSense: Uptime |<p>MIB: SNMPv2-MIB</p><p>System uptime in 'N days, hh:mm:ss' format.</p> |SNMP |system.uptime<p>**Preprocessing**:</p><p>- MULTIPLIER: `0.01`</p> |
|Status |PFSense: SNMP agent availability |<p>Availability of SNMP checks on the host. The value of this item corresponds to availability icons in the host list.</p><p>Possible value:</p><p>0 - not available</p><p>1 - available</p><p>2 - unknown</p> |INTERNAL |zabbix[host,snmp,available] |
-|Storage |PFSense: [{#FSNAME}]: Used space |<p>MIB: UCD-SNMP-MIB</p><p>If having problems collecting this item make sure access to UCD-SNMP-MIB is allowed.</p> |SNMP |vfs.fs.used[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p> |
-|Storage |PFSense: [{#FSNAME}]: Total space |<p>MIB: UCD-SNMP-MIB</p><p>If having problems collecting this item make sure access to UCD-SNMP-MIB is allowed.</p> |SNMP |vfs.fs.total[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- MULTIPLIER: `1024`</p> |
-|Storage |PFSense: [{#FSNAME}]: Space utilization |<p>MIB: UCD-SNMP-MIB</p><p>If having problems collecting this item make sure access to UCD-SNMP-MIB is allowed.</p> |SNMP |vfs.fs.pused[{#SNMPINDEX}] |
-|Storage |PFSense: [{#FSNAME}]: Free inodes in % |<p>MIB: UCD-SNMP-MIB</p><p>If having problems collecting this item make sure access to UCD-SNMP-MIB is allowed.</p> |SNMP |vfs.fs.inode.pfree[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- JAVASCRIPT: `return (100-value);`</p> |
-|Storage |PFSense: [{#DEVNAME}]: Disk read rate |<p>MIB: UCD-DISKIO-MIB</p><p>The number of read accesses from this device since boot.</p> |SNMP |vfs.dev.read.rate[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
-|Storage |PFSense: [{#DEVNAME}]: Disk write rate |<p>MIB: UCD-DISKIO-MIB</p><p>The number of write accesses from this device since boot.</p> |SNMP |vfs.dev.write.rate[{#SNMPINDEX}]<p>**Preprocessing**:</p><p>- CHANGE_PER_SECOND</p> |
-|Storage |PFSense: [{#DEVNAME}]: Disk utilization |<p>MIB: UCD-DISKIO-MIB</p><p>The 1 minute average load of disk (%).</p> |SNMP |vfs.dev.util[{#SNMPINDEX}] |
## Triggers
|Name|Description|Expression|Severity|Dependencies and additional info|
|----|-----------|----|----|----|
-|PFSense: Load average is too high |<p>Per CPU load average is too high. Your system may be slow to respond.</p> |`min(/PFSense SNMP/system.cpu.load.avg1,5m)/last(/PFSense SNMP/system.cpu.num)>{$LOAD_AVG_PER_CPU.MAX.WARN} and last(/PFSense SNMP/system.cpu.load.avg5)>0 and last(/PFSense SNMP/system.cpu.load.avg15)>0 ` |AVERAGE | |
-|PFSense: High CPU utilization |<p>CPU utilization is too high. The system might be slow to respond.</p> |`min(/PFSense SNMP/system.cpu.util[{#SNMPINDEX}],5m)>{$CPU.UTIL.CRIT}` |WARNING |<p>**Depends on**:</p><p>- PFSense: Load average is too high</p> |
-|PFSense: System name has changed |<p>System name has changed. Ack to close.</p> |`last(/PFSense SNMP/system.name,#1)<>last(/PFSense SNMP/system.name,#2) and length(last(/PFSense SNMP/system.name))>0` |INFO |<p>Manual close: YES</p> |
-|PFSense: Lack of available memory |<p>The system is running out of memory.</p> |`min(/PFSense SNMP/vm.memory.available,5m)<{$MEMORY.AVAILABLE.MIN} and last(/PFSense SNMP/vm.memory.total)>0` |AVERAGE | |
-|PFSense: High memory utilization |<p>The system is running out of free memory.</p> |`min(/PFSense SNMP/vm.memory.util,5m)>{$MEMORY.UTIL.MAX}` |AVERAGE |<p>**Depends on**:</p><p>- PFSense: Lack of available memory</p> |
-|PFSense: High swap space usage |<p>This trigger is ignored, if there is no swap configured.</p> |`min(/PFSense SNMP/system.swap.pfree,5m)<{$SWAP.PFREE.MIN.WARN} and last(/PFSense SNMP/system.swap.total)>0` |WARNING |<p>**Depends on**:</p><p>- PFSense: High memory utilization</p><p>- PFSense: Lack of available memory</p> |
|PFSense: Interface [{#IFNAME}({#IFALIAS})]: High input error rate |<p>Recovers when below 80% of {$IF.ERRORS.WARN:"{#IFNAME}"} threshold.</p> |`min(/PFSense SNMP/net.if.in.errors[{#SNMPINDEX}],5m)>{$IF.ERRORS.WARN:"{#IFNAME}"}`<p>Recovery expression:</p>`max(/PFSense SNMP/net.if.in.errors[{#SNMPINDEX}],5m)<{$IF.ERRORS.WARN:"{#IFNAME}"}*0.8` |WARNING |<p>**Depends on**:</p><p>- PFSense: Interface [{#IFNAME}({#IFALIAS})]: Link down</p> |
|PFSense: Interface [{#IFNAME}({#IFALIAS})]: High inbound bandwidth usage |<p>The network interface utilization is close to its estimated maximum bandwidth.</p> |`(avg(/PFSense SNMP/net.if.in[{#SNMPINDEX}],15m)>({$IF.UTIL.MAX:"{#IFNAME}"}/100)*last(/PFSense SNMP/net.if.speed[{#SNMPINDEX}])) and last(/PFSense SNMP/net.if.speed[{#SNMPINDEX}])>0 `<p>Recovery expression:</p>`avg(/PFSense SNMP/net.if.in[{#SNMPINDEX}],15m)<(({$IF.UTIL.MAX:"{#IFNAME}"}-3)/100)*last(/PFSense SNMP/net.if.speed[{#SNMPINDEX}])` |WARNING |<p>**Depends on**:</p><p>- PFSense: Interface [{#IFNAME}({#IFALIAS})]: Link down</p> |
|PFSense: Interface [{#IFNAME}({#IFALIAS})]: High output error rate |<p>Recovers when below 80% of {$IF.ERRORS.WARN:"{#IFNAME}"} threshold.</p> |`min(/PFSense SNMP/net.if.out.errors[{#SNMPINDEX}],5m)>{$IF.ERRORS.WARN:"{#IFNAME}"}`<p>Recovery expression:</p>`max(/PFSense SNMP/net.if.out.errors[{#SNMPINDEX}],5m)<{$IF.ERRORS.WARN:"{#IFNAME}"}*0.8` |WARNING |<p>**Depends on**:</p><p>- PFSense: Interface [{#IFNAME}({#IFALIAS})]: Link down</p> |
@@ -188,15 +122,7 @@ There are no template links in this template.
|PFSense: DHCP server is not running |<p>Please check DHCP server settings https://docs.netgate.com/pfsense/en/latest/services/dhcp/index.html</p> |`last(/PFSense SNMP/pfsense.dhcpd.status)=0` |AVERAGE | |
|PFSense: DNS server is not running |<p>Please check DNS server settings https://docs.netgate.com/pfsense/en/latest/services/dns/index.html</p> |`last(/PFSense SNMP/pfsense.dns.status)=0` |AVERAGE | |
|PFSense: Web server is not running |<p>Please check nginx service status.</p> |`last(/PFSense SNMP/pfsense.nginx.status)=0` |AVERAGE | |
-|PFSense: Unavailable by ICMP ping |<p>Last three attempts returned timeout. Please check device connectivity.</p> |`max(/PFSense SNMP/icmpping,#3)=0` |HIGH | |
-|PFSense: High ICMP ping loss |<p>ICMP packets loss detected.</p> |`min(/PFSense SNMP/icmppingloss,5m)>{$ICMP_LOSS_WARN} and min(/PFSense SNMP/icmppingloss,5m)<100` |WARNING |<p>**Depends on**:</p><p>- PFSense: Unavailable by ICMP ping</p> |
-|PFSense: High ICMP ping response time |<p>Average ICMP response time is too big.</p> |`avg(/PFSense SNMP/icmppingsec,5m)>{$ICMP_RESPONSE_TIME_WARN}` |WARNING |<p>**Depends on**:</p><p>- PFSense: Unavailable by ICMP ping</p> |
-|PFSense: has been restarted |<p>Uptime is less than 10 minutes.</p> |`last(/PFSense SNMP/system.uptime)<10m` |INFO |<p>Manual close: YES</p> |
-|PFSense: No SNMP data collection |<p>SNMP is not available for polling. Please check device connectivity and SNMP settings.</p> |`max(/PFSense SNMP/zabbix[host,snmp,available],{$SNMP.TIMEOUT})=0` |WARNING |<p>**Depends on**:</p><p>- PFSense: Unavailable by ICMP ping</p> |
-|PFSense: [{#FSNAME}]: Disk space is critically low |<p>Two conditions should match: First, space utilization should be above {$VFS.FS.PUSED.MAX.CRIT:"{#FSNAME}"}.</p><p>Second condition should be one of the following:</p><p> - The disk free space is less than {$VFS.FS.FREE.MIN.CRIT:"{#FSNAME}"}.</p><p> - The disk will be full in less than 24 hours.</p> |`last(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}])>{$VFS.FS.PUSED.MAX.CRIT:"{#FSNAME}"} and ((last(/PFSense SNMP/vfs.fs.total[{#SNMPINDEX}])-last(/PFSense SNMP/vfs.fs.used[{#SNMPINDEX}]))<{$VFS.FS.FREE.MIN.CRIT:"{#FSNAME}"} or timeleft(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}],1h,100)<1d) ` |AVERAGE |<p>Manual close: YES</p> |
-|PFSense: [{#FSNAME}]: Disk space is low |<p>Two conditions should match: First, space utilization should be above {$VFS.FS.PUSED.MAX.WARN:"{#FSNAME}"}.</p><p>Second condition should be one of the following:</p><p> - The disk free space is less than {$VFS.FS.FREE.MIN.WARN:"{#FSNAME}"}.</p><p> - The disk will be full in less than 24 hours.</p> |`last(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}])>{$VFS.FS.PUSED.MAX.WARN:"{#FSNAME}"} and ((last(/PFSense SNMP/vfs.fs.total[{#SNMPINDEX}])-last(/PFSense SNMP/vfs.fs.used[{#SNMPINDEX}]))<{$VFS.FS.FREE.MIN.WARN:"{#FSNAME}"} or timeleft(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}],1h,100)<1d) ` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- PFSense: [{#FSNAME}]: Disk space is critically low</p> |
-|PFSense: [{#FSNAME}]: Running out of free inodes |<p>It may become impossible to write to disk if there are no index nodes left.</p><p>As symptoms, 'No space left on device' or 'Disk is full' errors may be seen even though free space is available.</p> |`min(/PFSense SNMP/vfs.fs.inode.pfree[{#SNMPINDEX}],5m)<{$VFS.FS.INODE.PFREE.MIN.CRIT:"{#FSNAME}"}` |AVERAGE | |
-|PFSense: [{#FSNAME}]: Running out of free inodes |<p>It may become impossible to write to disk if there are no index nodes left.</p><p>As symptoms, 'No space left on device' or 'Disk is full' errors may be seen even though free space is available.</p> |`min(/PFSense SNMP/vfs.fs.inode.pfree[{#SNMPINDEX}],5m)<{$VFS.FS.INODE.PFREE.MIN.WARN:"{#FSNAME}"}` |WARNING |<p>**Depends on**:</p><p>- PFSense: [{#FSNAME}]: Running out of free inodes</p> |
+|PFSense: No SNMP data collection |<p>SNMP is not available for polling. Please check device connectivity and SNMP settings.</p> |`max(/PFSense SNMP/zabbix[host,snmp,available],{$SNMP.TIMEOUT})=0` |WARNING | |
## Feedback
diff --git a/templates/app/pfsense_snmp/template_app_pfsense_snmp.yaml b/templates/app/pfsense_snmp/template_app_pfsense_snmp.yaml
index 0ff1e5cd211..aef5f602451 100644
--- a/templates/app/pfsense_snmp/template_app_pfsense_snmp.yaml
+++ b/templates/app/pfsense_snmp/template_app_pfsense_snmp.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-04-13T22:23:06Z'
+ date: '2022-06-06T12:13:26Z'
groups:
-
uuid: a571c0d144b14fd4a87a9d9b2aa9fcd6
@@ -21,9 +21,6 @@ zabbix_export:
MIBs used:
BEGEMOT-PF-MIB
HOST-RESOURCES-MIB
- SNMPv2-MIB
- UCD-DISKIO-MIB
- UCD-SNMP-MIB
Template tooling version used: 0.41
groups:
@@ -31,106 +28,6 @@ zabbix_export:
name: Templates/Applications
items:
-
- uuid: 2257828f22fd4b5ea7ffffc1c4296674
- name: 'PFSense: ICMP ping'
- type: SIMPLE
- key: icmpping
- history: 7d
- description: |
- Host accessibility by ICMP.
- 0 - ICMP ping fails.
- 1 - ICMP ping successful.
- valuemap:
- name: 'Service state'
- tags:
- -
- tag: component
- value: health
- -
- tag: component
- value: network
- triggers:
- -
- uuid: 8bf17f0b4f0642d48d76ea3add39ae23
- expression: 'max(/PFSense SNMP/icmpping,#3)=0'
- name: 'PFSense: Unavailable by ICMP ping'
- priority: HIGH
- description: 'Last three attempts returned timeout. Please check device connectivity.'
- tags:
- -
- tag: scope
- value: availability
- -
- uuid: b70284fd74b246da83e262ed66449612
- name: 'PFSense: ICMP loss'
- type: SIMPLE
- key: icmppingloss
- history: 7d
- value_type: FLOAT
- units: '%'
- description: 'Percentage of lost packets.'
- tags:
- -
- tag: component
- value: health
- -
- tag: component
- value: network
- triggers:
- -
- uuid: e02cf70bf2744d61abf8d770cc568867
- expression: 'min(/PFSense SNMP/icmppingloss,5m)>{$ICMP_LOSS_WARN} and min(/PFSense SNMP/icmppingloss,5m)<100'
- name: 'PFSense: High ICMP ping loss'
- opdata: 'Loss: {ITEM.LASTVALUE1}'
- priority: WARNING
- description: 'ICMP packets loss detected.'
- dependencies:
- -
- name: 'PFSense: Unavailable by ICMP ping'
- expression: 'max(/PFSense SNMP/icmpping,#3)=0'
- tags:
- -
- tag: scope
- value: availability
- -
- tag: scope
- value: performance
- -
- uuid: d1493a3c5a634ffb99aa031d134d07ad
- name: 'PFSense: ICMP response time'
- type: SIMPLE
- key: icmppingsec
- history: 7d
- value_type: FLOAT
- units: s
- description: 'ICMP ping response time (in seconds).'
- tags:
- -
- tag: component
- value: health
- -
- tag: component
- value: network
- triggers:
- -
- uuid: 364ec24b0c88416fb2575a6b06340ec7
- expression: 'avg(/PFSense SNMP/icmppingsec,5m)>{$ICMP_RESPONSE_TIME_WARN}'
- name: 'PFSense: High ICMP ping response time'
- opdata: 'Value: {ITEM.LASTVALUE1}'
- priority: WARNING
- description: 'Average ICMP response time is too big.'
- dependencies:
- -
- name: 'PFSense: Unavailable by ICMP ping'
- expression: 'max(/PFSense SNMP/icmpping,#3)=0'
- tags:
- -
- tag: scope
- value: availability
- -
- tag: scope
- value: performance
- -
uuid: a2a331bc385344a1877a45f3887b7d91
name: 'PFSense: DHCP server status'
type: SNMP_AGENT
@@ -517,465 +414,6 @@ zabbix_export:
tag: scope
value: capacity
-
- uuid: ef15deb6c36945b281a152e2af39dd1d
- name: 'PFSense: System contact details'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.2.1.1.4.0
- key: system.contact
- delay: 15m
- history: 7d
- trends: '0'
- value_type: CHAR
- description: |
- MIB: SNMPv2-MIB
- The textual identification of the contact person for this managed node, together with information on how to contact this person. If no contact information is known, the value is the zero-length string.
- inventory_link: CONTACT
- preprocessing:
- -
- type: DISCARD_UNCHANGED_HEARTBEAT
- parameters:
- - 6h
- tags:
- -
- tag: component
- value: system
- -
- uuid: 44da130c834d476b884a5cfec2ef53cd
- name: 'PFSense: Interrupts per second'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.11.59.0
- key: system.cpu.intr
- history: 7d
- value_type: FLOAT
- description: |
- MIB: UCD-SNMP-MIB
- Number of interrupts processed.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: 2d426f9553de4c01bf3e92b72a0ef8d5
- name: 'PFSense: Load average (1m avg)'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.10.1.3["index","1.3.6.1.4.1.2021.10.1.2", "Load-1"]'
- key: system.cpu.load.avg1
- history: 7d
- value_type: FLOAT
- description: |
- MIB: UCD-SNMP-MIB
- The 1 minute load averages.
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: 371cfe5f4cc64582a7d9dacba183c0ea
- name: 'PFSense: Load average (5m avg)'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.10.1.3["index","1.3.6.1.4.1.2021.10.1.2", "Load-5"]'
- key: system.cpu.load.avg5
- history: 7d
- value_type: FLOAT
- description: |
- MIB: UCD-SNMP-MIB
- The 5 minutes load averages.
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: e160d773f3814cba89c8729f2b0354f8
- name: 'PFSense: Load average (15m avg)'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.10.1.3["index","1.3.6.1.4.1.2021.10.1.2", "Load-15"]'
- key: system.cpu.load.avg15
- history: 7d
- value_type: FLOAT
- description: |
- MIB: UCD-SNMP-MIB
- The 15 minutes load averages.
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: 8b9cc8b5ab164399a56169ed0603a804
- name: 'PFSense: Number of CPUs'
- type: SNMP_AGENT
- snmp_oid: 'discovery[{#SNMPVALUE},1.3.6.1.2.1.25.3.3.1.1]'
- key: system.cpu.num
- history: 7d
- description: |
- MIB: HOST-RESOURCES-MIB
- Count the number of CPU cores by counting number of cores discovered in hrProcessorTable using LLD.
- preprocessing:
- -
- type: JAVASCRIPT
- parameters:
- - |
- //count the number of cores
- return JSON.parse(value).length;
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: c476e9b090284cb6aaee6cd3fdcd1b88
- name: 'PFSense: Context switches per second'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.11.60.0
- key: system.cpu.switches
- history: 7d
- value_type: FLOAT
- description: |
- MIB: UCD-SNMP-MIB
- Number of context switches.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: e7038a49caec4d5293ceb2a18c1d839e
- name: 'PFSense: System description'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.2.1.1.1.0
- key: system.descr
- delay: 15m
- history: 7d
- trends: '0'
- value_type: CHAR
- description: |
- MIB: SNMPv2-MIB
- System description of the host.
- preprocessing:
- -
- type: DISCARD_UNCHANGED_HEARTBEAT
- parameters:
- - 6h
- tags:
- -
- tag: component
- value: system
- -
- uuid: cec39a0105434fbeb30529763a7298bb
- name: 'PFSense: System location'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.2.1.1.6.0
- key: system.location
- delay: 15m
- history: 7d
- trends: '0'
- value_type: CHAR
- description: |
- MIB: SNMPv2-MIB
- The physical location of this node. If the location is unknown, the value is the zero-length string.
- inventory_link: LOCATION
- preprocessing:
- -
- type: DISCARD_UNCHANGED_HEARTBEAT
- parameters:
- - 6h
- tags:
- -
- tag: component
- value: system
- -
- uuid: 8c3afc62530e457da1e2d057c96fc442
- name: 'PFSense: System name'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.2.1.1.5.0
- key: system.name
- delay: 15m
- history: 7d
- trends: '0'
- value_type: CHAR
- description: |
- MIB: SNMPv2-MIB
- System host name.
- inventory_link: NAME
- preprocessing:
- -
- type: DISCARD_UNCHANGED_HEARTBEAT
- parameters:
- - 6h
- tags:
- -
- tag: component
- value: system
- triggers:
- -
- uuid: d00224077a90496099c2622e3c662624
- expression: 'last(/PFSense SNMP/system.name,#1)<>last(/PFSense SNMP/system.name,#2) and length(last(/PFSense SNMP/system.name))>0'
- name: 'PFSense: System name has changed'
- event_name: 'PFSense: System name has changed (new name: {ITEM.VALUE})'
- priority: INFO
- description: 'System name has changed. Ack to close.'
- manual_close: 'YES'
- tags:
- -
- tag: scope
- value: notice
- -
- tag: scope
- value: security
- -
- uuid: 6f1d5a86066d4f669a8c11f77fbc078d
- name: 'PFSense: System object ID'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.2.1.1.2.0
- key: system.objectid
- delay: 15m
- history: 7d
- trends: '0'
- value_type: CHAR
- description: |
- MIB: SNMPv2-MIB
- The vendor authoritative identification of the network management subsystem contained in the entity. This value is allocated within the SMI enterprises subtree (1.3.6.1.4.1) and provides an easy and unambiguous means for determining what kind of box is being managed.
- preprocessing:
- -
- type: DISCARD_UNCHANGED_HEARTBEAT
- parameters:
- - 6h
- tags:
- -
- tag: component
- value: system
- -
- uuid: 163e057ff21a40cba86fe7ac023d83b1
- name: 'PFSense: Free swap space'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.4.4.0
- key: system.swap.free
- history: 7d
- units: B
- description: |
- MIB: UCD-SNMP-MIB
- The amount of swap space currently unused or available.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '1024'
- tags:
- -
- tag: component
- value: memory
- -
- tag: component
- value: storage
- -
- uuid: db5dabf90df84ea19e1e164204eeefdc
- name: 'PFSense: Free swap space in %'
- type: CALCULATED
- key: system.swap.pfree
- history: 7d
- value_type: FLOAT
- units: '%'
- params: 'last(//system.swap.free)/last(//system.swap.total)*100'
- description: 'The free space of swap volume/file in percent.'
- tags:
- -
- tag: component
- value: memory
- -
- tag: component
- value: storage
- -
- uuid: 3a7282d75aea46a38def8dbb5e5018eb
- name: 'PFSense: Total swap space'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.4.3.0
- key: system.swap.total
- history: 7d
- units: B
- description: |
- MIB: UCD-SNMP-MIB
- The total amount of swap space configured for this host.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '1024'
- tags:
- -
- tag: component
- value: memory
- -
- tag: component
- value: storage
- -
- uuid: 0979a3c35bf347a38b2c5a9dbc423325
- name: 'PFSense: Uptime'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.2.1.25.1.1.0
- key: system.uptime
- delay: 30s
- history: 7d
- trends: 0d
- units: uptime
- description: |
- MIB: SNMPv2-MIB
- System uptime in 'N days, hh:mm:ss' format.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '0.01'
- tags:
- -
- tag: component
- value: system
- triggers:
- -
- uuid: 9da88806fff147f8aa7a79e526f0f7d1
- expression: 'last(/PFSense SNMP/system.uptime)<10m'
- name: 'PFSense: has been restarted'
- event_name: 'PFSense: has been restarted (uptime < 10m)'
- priority: INFO
- description: 'Uptime is less than 10 minutes.'
- manual_close: 'YES'
- tags:
- -
- tag: scope
- value: notice
- -
- uuid: 1cc1e018e6364da1bc7264501e668ce8
- name: 'PFSense: Available memory'
- type: CALCULATED
- key: vm.memory.available
- history: 7d
- units: B
- params: last(//vm.memory.free)+last(//vm.memory.buffers)+last(//vm.memory.cached)
- description: 'Please note that memory utilization is a rough estimate, since memory available is calculated as free+buffers+cached, which is not 100% accurate, but the best we can get using SNMP.'
- tags:
- -
- tag: component
- value: memory
- -
- uuid: 8d421b71ea5f453d9079596617b64d1f
- name: 'PFSense: Memory (buffers)'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.4.14.0
- key: vm.memory.buffers
- history: 7d
- units: B
- description: |
- MIB: UCD-SNMP-MIB
- The total amount of real or virtual memory currently allocated for use as memory buffers.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '1024'
- tags:
- -
- tag: component
- value: memory
- -
- uuid: 83ae273de74b4779a66553e976d5fa6e
- name: 'PFSense: Memory (cached)'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.4.15.0
- key: vm.memory.cached
- history: 7d
- units: B
- description: |
- MIB: UCD-SNMP-MIB
- The total amount of real or virtual memory currently allocated for use as cached memory.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '1024'
- tags:
- -
- tag: component
- value: memory
- -
- uuid: 4efc067cae354030a8cbd1054671bdf7
- name: 'PFSense: Free memory'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.4.6.0
- key: vm.memory.free
- history: 7d
- units: B
- description: |
- MIB: UCD-SNMP-MIB
- The amount of real/physical memory currently unused or available.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '1024'
- tags:
- -
- tag: component
- value: memory
- -
- uuid: 84705a5340d149cf8a0792a1eb997c8e
- name: 'PFSense: Total memory'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.4.5.0
- key: vm.memory.total
- history: 7d
- units: B
- description: |
- MIB: UCD-SNMP-MIB
- Total memory in Bytes.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '1024'
- tags:
- -
- tag: component
- value: memory
- -
- uuid: d0da8070297d49c3a44aeec984abd085
- name: 'PFSense: Memory utilization'
- type: CALCULATED
- key: vm.memory.util
- history: 7d
- value_type: FLOAT
- units: '%'
- params: '(last(//vm.memory.total)-(last(//vm.memory.free)+last(//vm.memory.buffers)+last(//vm.memory.cached)))/last(//vm.memory.total)*100'
- description: 'Please note that memory utilization is a rough estimate, since memory available is calculated as free+buffers+cached, which is not 100% accurate, but the best we can get using SNMP.'
- tags:
- -
- tag: component
- value: memory
- triggers:
- -
- uuid: 786ef350e20f4dfc9371a54e4ffeda63
- expression: 'min(/PFSense SNMP/vm.memory.util,5m)>{$MEMORY.UTIL.MAX}'
- name: 'PFSense: High memory utilization'
- event_name: 'PFSense: High memory utilization (>{$MEMORY.UTIL.MAX}% for 5m)'
- priority: AVERAGE
- description: 'The system is running out of free memory.'
- dependencies:
- -
- name: 'PFSense: Lack of available memory'
- expression: 'min(/PFSense SNMP/vm.memory.available,5m)<{$MEMORY.AVAILABLE.MIN} and last(/PFSense SNMP/vm.memory.total)>0'
- tags:
- -
- tag: scope
- value: capacity
- -
- tag: scope
- value: performance
- -
uuid: 585c88310a704e119aa5fc7f7f9c3804
name: 'PFSense: SNMP agent availability'
type: INTERNAL
@@ -1004,281 +442,17 @@ zabbix_export:
opdata: 'Current state: {ITEM.LASTVALUE1}'
priority: WARNING
description: 'SNMP is not available for polling. Please check device connectivity and SNMP settings.'
- dependencies:
- -
- name: 'PFSense: Unavailable by ICMP ping'
- expression: 'max(/PFSense SNMP/icmpping,#3)=0'
tags:
-
tag: scope
value: availability
discovery_rules:
-
- uuid: 47e148e6760145cc9288fa6b4e8d0013
- name: 'CPU discovery'
- type: DEPENDENT
- key: cpu.discovery
- delay: '0'
- description: 'This discovery will create set of per core CPU metrics from UCD-SNMP-MIB, using {#CPU.COUNT} in preprocessing. That''s the only reason why LLD is used.'
- item_prototypes:
- -
- uuid: 404e2eab283f4c79a6c2c52684d363c8
- name: 'PFSense: CPU idle time'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.11.11.0
- key: 'system.cpu.idle[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-SNMP-MIB
- The time the CPU has spent doing nothing.
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: 57a73880d8ff4779a2bbe9d774ad3fc8
- name: 'PFSense: CPU interrupt time'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.11.56.0
- key: 'system.cpu.interrupt[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-SNMP-MIB
- The amount of time the CPU has been servicing hardware interrupts.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- -
- type: JAVASCRIPT
- parameters:
- - |
- //to get utilization in %, divide by N, where N is number of cores.
- return value/{#CPU.COUNT}
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: 01e84522bcfd4cc2834c6c196e887563
- name: 'PFSense: CPU iowait time'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.11.54.0
- key: 'system.cpu.iowait[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-SNMP-MIB
- Amount of time the CPU has been waiting for I/O to complete.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- -
- type: JAVASCRIPT
- parameters:
- - |
- //to get utilization in %, divide by N, where N is number of cores.
- return value/{#CPU.COUNT}
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: 4eb44b0d8aa2440480c92b880196ba72
- name: 'PFSense: CPU nice time'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.11.51.0
- key: 'system.cpu.nice[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-SNMP-MIB
- The time the CPU has spent running users' processes that have been niced.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- -
- type: JAVASCRIPT
- parameters:
- - |
- //to get utilization in %, divide by N, where N is number of cores.
- return value/{#CPU.COUNT}
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: 4b412b3f128746c69364590965c02eea
- name: 'PFSense: CPU system time'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.11.52.0
- key: 'system.cpu.system[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-SNMP-MIB
- The time the CPU has spent running the kernel and its processes.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- -
- type: JAVASCRIPT
- parameters:
- - |
- //to get utilization in %, divide by N, where N is number of cores.
- return value/{#CPU.COUNT}
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: 922ad22eed7f427e8560942941a427ba
- name: 'PFSense: CPU user time'
- type: SNMP_AGENT
- snmp_oid: 1.3.6.1.4.1.2021.11.50.0
- key: 'system.cpu.user[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-SNMP-MIB
- The time the CPU has spent running users' processes that are not niced.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- -
- type: JAVASCRIPT
- parameters:
- - |
- //to get utilization in %, divide by N, where N is number of cores.
- return value/{#CPU.COUNT}
- tags:
- -
- tag: component
- value: cpu
- -
- uuid: cae6db6f7aae4ecdaac3e451eee5eacb
- name: 'PFSense: CPU utilization'
- type: DEPENDENT
- key: 'system.cpu.util[{#SNMPINDEX}]'
- delay: '0'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: 'CPU utilization in %.'
- preprocessing:
- -
- type: JAVASCRIPT
- parameters:
- - |
- //Calculate utilization
- return (100 - value)
- master_item:
- key: 'system.cpu.idle[{#SNMPINDEX}]'
- tags:
- -
- tag: component
- value: cpu
- trigger_prototypes:
- -
- uuid: 76857a00f1fe4d639dbb5cfe775f36d6
- expression: 'min(/PFSense SNMP/system.cpu.util[{#SNMPINDEX}],5m)>{$CPU.UTIL.CRIT}'
- name: 'PFSense: High CPU utilization'
- event_name: 'PFSense: High CPU utilization (over {$CPU.UTIL.CRIT}% for 5m)'
- opdata: 'Current utilization: {ITEM.LASTVALUE1}'
- priority: WARNING
- description: 'CPU utilization is too high. The system might be slow to respond.'
- dependencies:
- -
- name: 'PFSense: Load average is too high'
- expression: |
- min(/PFSense SNMP/system.cpu.load.avg1,5m)/last(/PFSense SNMP/system.cpu.num)>{$LOAD_AVG_PER_CPU.MAX.WARN}
- and last(/PFSense SNMP/system.cpu.load.avg5)>0
- and last(/PFSense SNMP/system.cpu.load.avg15)>0
- tags:
- -
- tag: scope
- value: performance
- graph_prototypes:
- -
- uuid: 2b2dd0814a1e479fb9faa178b10ce83e
- name: 'PFSense: CPU usage{#SINGLETON}'
- type: STACKED
- ymin_type_1: FIXED
- ymax_type_1: FIXED
- graph_items:
- -
- color: 1A7C11
- item:
- host: 'PFSense SNMP'
- key: 'system.cpu.system[{#SNMPINDEX}]'
- -
- sortorder: '1'
- color: 2774A4
- item:
- host: 'PFSense SNMP'
- key: 'system.cpu.user[{#SNMPINDEX}]'
- -
- sortorder: '2'
- color: F63100
- item:
- host: 'PFSense SNMP'
- key: 'system.cpu.nice[{#SNMPINDEX}]'
- -
- sortorder: '3'
- color: A54F10
- item:
- host: 'PFSense SNMP'
- key: 'system.cpu.iowait[{#SNMPINDEX}]'
- -
- sortorder: '4'
- color: FC6EA3
- item:
- host: 'PFSense SNMP'
- key: 'system.cpu.interrupt[{#SNMPINDEX}]'
- -
- uuid: 3131a60ebd384c79991ba2d6a3e3cf31
- name: 'PFSense: CPU utilization{#SINGLETON}'
- ymin_type_1: FIXED
- ymax_type_1: FIXED
- graph_items:
- -
- drawtype: GRADIENT_LINE
- color: 1A7C11
- item:
- host: 'PFSense SNMP'
- key: 'system.cpu.util[{#SNMPINDEX}]'
- master_item:
- key: system.cpu.num
- preprocessing:
- -
- type: JAVASCRIPT
- parameters:
- - |
- //count the number of CPU cores
- return JSON.stringify([{"{#CPU.COUNT}": value, "{#SNMPINDEX}": 0, "{#SINGLETON}":""}])
- -
uuid: 441ad463ea5544f89cb56b0b8f7763f9
name: 'Network interfaces discovery'
type: SNMP_AGENT
snmp_oid: 'discovery[{#IFOPERSTATUS},1.3.6.1.2.1.2.2.1.8,{#IFADMINSTATUS},1.3.6.1.2.1.2.2.1.7,{#IFALIAS},1.3.6.1.2.1.31.1.1.1.18,{#IFNAME},1.3.6.1.2.1.31.1.1.1.1,{#IFDESCR},1.3.6.1.2.1.2.2.1.2,{#IFTYPE},1.3.6.1.2.1.2.2.1.3]'
- key: net.if.discovery
+ key: pfsense.net.if.discovery
delay: 1h
filter:
evaltype: AND
@@ -2398,337 +1572,6 @@ zabbix_export:
item:
host: 'PFSense SNMP'
key: 'net.if.out.block.v6.pps[{#SNMPINDEX}]'
- -
- uuid: 2ee60ad904ba45348f5e42b02f27aa98
- name: 'Block devices discovery'
- type: SNMP_AGENT
- snmp_oid: 'discovery[{#DEVNAME},1.3.6.1.4.1.2021.13.15.1.1.2]'
- key: vfs.dev.discovery
- delay: 1h
- filter:
- evaltype: AND
- conditions:
- -
- macro: '{#DEVNAME}'
- value: '{$VFS.DEV.DEVNAME.MATCHES}'
- formulaid: A
- -
- macro: '{#DEVNAME}'
- value: '{$VFS.DEV.DEVNAME.NOT_MATCHES}'
- operator: NOT_MATCHES_REGEX
- formulaid: B
- description: 'Block devices are discovered from UCD-DISKIO-MIB::diskIOTable (http://net-snmp.sourceforge.net/docs/mibs/ucdDiskIOMIB.html#diskIOTable).'
- item_prototypes:
- -
- uuid: afda1ff507ea43f39dceaf76f3552690
- name: 'PFSense: [{#DEVNAME}]: Disk read rate'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.13.15.1.1.5.{#SNMPINDEX}'
- key: 'vfs.dev.read.rate[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '!r/s'
- description: |
- MIB: UCD-DISKIO-MIB
- The number of read accesses from this device since boot.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- tags:
- -
- tag: component
- value: storage
- -
- tag: disk
- value: '{#DEVNAME}'
- -
- uuid: 180555ff64304d15bd17b9d58cfc91ed
- name: 'PFSense: [{#DEVNAME}]: Disk utilization'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.13.15.1.1.9.{#SNMPINDEX}'
- key: 'vfs.dev.util[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-DISKIO-MIB
- The 1 minute average load of disk (%).
- tags:
- -
- tag: component
- value: storage
- -
- tag: disk
- value: '{#DEVNAME}'
- -
- uuid: 35777597864a4db0ad063c609c3dbee3
- name: 'PFSense: [{#DEVNAME}]: Disk write rate'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.13.15.1.1.6.{#SNMPINDEX}'
- key: 'vfs.dev.write.rate[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '!w/s'
- description: |
- MIB: UCD-DISKIO-MIB
- The number of write accesses from this device since boot.
- preprocessing:
- -
- type: CHANGE_PER_SECOND
- parameters:
- - ''
- tags:
- -
- tag: component
- value: storage
- -
- tag: disk
- value: '{#DEVNAME}'
- graph_prototypes:
- -
- uuid: 68683d3f206245639c89bda5da17e0d3
- name: 'PFSense: [{#DEVNAME}]: Disk read/write rates'
- graph_items:
- -
- color: 1A7C11
- item:
- host: 'PFSense SNMP'
- key: 'vfs.dev.read.rate[{#SNMPINDEX}]'
- -
- sortorder: '1'
- drawtype: GRADIENT_LINE
- color: 2774A4
- item:
- host: 'PFSense SNMP'
- key: 'vfs.dev.write.rate[{#SNMPINDEX}]'
- -
- uuid: 8c6084603d134d1e96aeff663fc0045d
- name: 'Mounted filesystem discovery'
- type: SNMP_AGENT
- snmp_oid: 'discovery[{#FSNAME},.1.3.6.1.2.1.25.3.8.1.2,{#FSTYPE},.1.3.6.1.2.1.25.3.8.1.4]'
- key: vfs.fs.discovery
- delay: 1h
- filter:
- evaltype: AND
- conditions:
- -
- macro: '{#FSTYPE}'
- value: '{$VFS.FS.FSTYPE.MATCHES}'
- formulaid: C
- -
- macro: '{#FSTYPE}'
- value: '{$VFS.FS.FSTYPE.NOT_MATCHES}'
- operator: NOT_MATCHES_REGEX
- formulaid: D
- -
- macro: '{#FSNAME}'
- value: '{$VFS.FS.FSNAME.MATCHES}'
- formulaid: A
- -
- macro: '{#FSNAME}'
- value: '{$VFS.FS.FSNAME.NOT_MATCHES}'
- operator: NOT_MATCHES_REGEX
- formulaid: B
- description: 'HOST-RESOURCES-MIB::hrFS discovery with filter.'
- item_prototypes:
- -
- uuid: 73c51f990076413f959abe81cdcd5225
- name: 'PFSense: [{#FSNAME}]: Free inodes in %'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.9.1.10["index","1.3.6.1.4.1.2021.9.1.2","{#FSNAME}"]'
- key: 'vfs.fs.inode.pfree[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-SNMP-MIB
- If having problems collecting this item make sure access to UCD-SNMP-MIB is allowed.
- preprocessing:
- -
- type: JAVASCRIPT
- parameters:
- - 'return (100-value);'
- tags:
- -
- tag: component
- value: storage
- -
- tag: filesystem
- value: '{#FSNAME}'
- trigger_prototypes:
- -
- uuid: 55324e1072f34c5fbc67a7f0566f64d6
- expression: 'min(/PFSense SNMP/vfs.fs.inode.pfree[{#SNMPINDEX}],5m)<{$VFS.FS.INODE.PFREE.MIN.CRIT:"{#FSNAME}"}'
- name: 'PFSense: [{#FSNAME}]: Running out of free inodes'
- event_name: 'PFSense: [{#FSNAME}]: Running out of free inodes (free < {$VFS.FS.INODE.PFREE.MIN.CRIT:"{#FSNAME}"}%)'
- opdata: 'Free inodes: {ITEM.LASTVALUE1}'
- priority: AVERAGE
- description: |
- It may become impossible to write to disk if there are no index nodes left.
- As symptoms, 'No space left on device' or 'Disk is full' errors may be seen even though free space is available.
- tags:
- -
- tag: scope
- value: capacity
- -
- uuid: 48917b7fd7cd4e16a7746ce9cf88a7b6
- expression: 'min(/PFSense SNMP/vfs.fs.inode.pfree[{#SNMPINDEX}],5m)<{$VFS.FS.INODE.PFREE.MIN.WARN:"{#FSNAME}"}'
- name: 'PFSense: [{#FSNAME}]: Running out of free inodes'
- event_name: 'PFSense: [{#FSNAME}]: Running out of free inodes (free < {$VFS.FS.INODE.PFREE.MIN.WARN:"{#FSNAME}"}%)'
- opdata: 'Free inodes: {ITEM.LASTVALUE1}'
- priority: WARNING
- description: |
- It may become impossible to write to disk if there are no index nodes left.
- As symptoms, 'No space left on device' or 'Disk is full' errors may be seen even though free space is available.
- dependencies:
- -
- name: 'PFSense: [{#FSNAME}]: Running out of free inodes'
- expression: 'min(/PFSense SNMP/vfs.fs.inode.pfree[{#SNMPINDEX}],5m)<{$VFS.FS.INODE.PFREE.MIN.CRIT:"{#FSNAME}"}'
- tags:
- -
- tag: scope
- value: capacity
- -
- uuid: 51fa616aa3ee4d3ca1371d0d455244eb
- name: 'PFSense: [{#FSNAME}]: Space utilization'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.9.1.9["index","1.3.6.1.4.1.2021.9.1.2","{#FSNAME}"]'
- key: 'vfs.fs.pused[{#SNMPINDEX}]'
- history: 7d
- value_type: FLOAT
- units: '%'
- description: |
- MIB: UCD-SNMP-MIB
- If having problems collecting this item make sure access to UCD-SNMP-MIB is allowed.
- tags:
- -
- tag: component
- value: storage
- -
- tag: filesystem
- value: '{#FSNAME}'
- -
- uuid: f5f253e700f742579a4f91d3061a816e
- name: 'PFSense: [{#FSNAME}]: Total space'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.9.1.6["index","1.3.6.1.4.1.2021.9.1.2","{#FSNAME}"]'
- key: 'vfs.fs.total[{#SNMPINDEX}]'
- history: 7d
- units: B
- description: |
- MIB: UCD-SNMP-MIB
- If having problems collecting this item make sure access to UCD-SNMP-MIB is allowed.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '1024'
- tags:
- -
- tag: component
- value: storage
- -
- tag: filesystem
- value: '{#FSNAME}'
- -
- uuid: eaf1ed2e94cc41f69fda97778f27628c
- name: 'PFSense: [{#FSNAME}]: Used space'
- type: SNMP_AGENT
- snmp_oid: '1.3.6.1.4.1.2021.9.1.8["index","1.3.6.1.4.1.2021.9.1.2","{#FSNAME}"]'
- key: 'vfs.fs.used[{#SNMPINDEX}]'
- history: 7d
- units: B
- description: |
- MIB: UCD-SNMP-MIB
- If having problems collecting this item make sure access to UCD-SNMP-MIB is allowed.
- preprocessing:
- -
- type: MULTIPLIER
- parameters:
- - '1024'
- tags:
- -
- tag: component
- value: storage
- -
- tag: filesystem
- value: '{#FSNAME}'
- trigger_prototypes:
- -
- uuid: 711e2c5b136e4a51a2fb745d847a9fc5
- expression: |
- last(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}])>{$VFS.FS.PUSED.MAX.CRIT:"{#FSNAME}"} and
- ((last(/PFSense SNMP/vfs.fs.total[{#SNMPINDEX}])-last(/PFSense SNMP/vfs.fs.used[{#SNMPINDEX}]))<{$VFS.FS.FREE.MIN.CRIT:"{#FSNAME}"} or timeleft(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}],1h,100)<1d)
- name: 'PFSense: [{#FSNAME}]: Disk space is critically low'
- event_name: 'PFSense: [{#FSNAME}]: Disk space is critically low (used > {$VFS.FS.PUSED.MAX.CRIT:"{#FSNAME}"}%)'
- opdata: 'Space used: {ITEM.LASTVALUE3} of {ITEM.LASTVALUE2} ({ITEM.LASTVALUE1})'
- priority: AVERAGE
- description: |
- Two conditions should match: First, space utilization should be above {$VFS.FS.PUSED.MAX.CRIT:"{#FSNAME}"}.
- Second condition should be one of the following:
- - The disk free space is less than {$VFS.FS.FREE.MIN.CRIT:"{#FSNAME}"}.
- - The disk will be full in less than 24 hours.
- manual_close: 'YES'
- tags:
- -
- tag: scope
- value: availability
- -
- tag: scope
- value: capacity
- -
- uuid: ef0273e1092a40ff888e0ea87c859abb
- expression: |
- last(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}])>{$VFS.FS.PUSED.MAX.WARN:"{#FSNAME}"} and
- ((last(/PFSense SNMP/vfs.fs.total[{#SNMPINDEX}])-last(/PFSense SNMP/vfs.fs.used[{#SNMPINDEX}]))<{$VFS.FS.FREE.MIN.WARN:"{#FSNAME}"} or timeleft(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}],1h,100)<1d)
- name: 'PFSense: [{#FSNAME}]: Disk space is low'
- event_name: 'PFSense: [{#FSNAME}]: Disk space is low (used > {$VFS.FS.PUSED.MAX.WARN:"{#FSNAME}"}%)'
- opdata: 'Space used: {ITEM.LASTVALUE3} of {ITEM.LASTVALUE2} ({ITEM.LASTVALUE1})'
- priority: WARNING
- description: |
- Two conditions should match: First, space utilization should be above {$VFS.FS.PUSED.MAX.WARN:"{#FSNAME}"}.
- Second condition should be one of the following:
- - The disk free space is less than {$VFS.FS.FREE.MIN.WARN:"{#FSNAME}"}.
- - The disk will be full in less than 24 hours.
- manual_close: 'YES'
- dependencies:
- -
- name: 'PFSense: [{#FSNAME}]: Disk space is critically low'
- expression: |
- last(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}])>{$VFS.FS.PUSED.MAX.CRIT:"{#FSNAME}"} and
- ((last(/PFSense SNMP/vfs.fs.total[{#SNMPINDEX}])-last(/PFSense SNMP/vfs.fs.used[{#SNMPINDEX}]))<{$VFS.FS.FREE.MIN.CRIT:"{#FSNAME}"} or timeleft(/PFSense SNMP/vfs.fs.pused[{#SNMPINDEX}],1h,100)<1d)
- tags:
- -
- tag: scope
- value: availability
- -
- tag: scope
- value: capacity
- graph_prototypes:
- -
- uuid: af57ea943b0f45f6b03028ef48c07605
- name: 'PFSense: [{#FSNAME}]: Disk space usage'
- width: '600'
- height: '340'
- type: PIE
- show_3d: 'YES'
- graph_items:
- -
- color: '969696'
- calc_fnc: LAST
- type: GRAPH_SUM
- item:
- host: 'PFSense SNMP'
- key: 'vfs.fs.total[{#SNMPINDEX}]'
- -
- sortorder: '1'
- color: C80000
- calc_fnc: LAST
- item:
- host: 'PFSense SNMP'
- key: 'vfs.fs.used[{#SNMPINDEX}]'
tags:
-
tag: class
@@ -2738,18 +1581,6 @@ zabbix_export:
value: pfsense
macros:
-
- macro: '{$CPU.UTIL.CRIT}'
- value: '90'
- description: 'Threshold of CPU utilization for warning trigger in %.'
- -
- macro: '{$ICMP_LOSS_WARN}'
- value: '20'
- description: 'Threshold of ICMP packets loss for warning trigger in %.'
- -
- macro: '{$ICMP_RESPONSE_TIME_WARN}'
- value: '0.15'
- description: 'Threshold of average ICMP response time for warning trigger in seconds.'
- -
macro: '{$IF.ERRORS.WARN}'
value: '2'
description: 'Threshold of error packets rate for warning trigger. Can be used with interface name as context.'
@@ -2762,18 +1593,6 @@ zabbix_export:
value: '1'
description: 'Macro for operational state of the interface for link down trigger. Can be used with interface name as context.'
-
- macro: '{$LOAD_AVG_PER_CPU.MAX.WARN}'
- value: '1.5'
- description: 'Load per CPU considered sustainable. Tune if needed.'
- -
- macro: '{$MEMORY.AVAILABLE.MIN}'
- value: 20M
- description: 'Threshold of available memory for trigger in bytes.'
- -
- macro: '{$MEMORY.UTIL.MAX}'
- value: '90'
- description: 'Threshold of memory utilization for trigger in %'
- -
macro: '{$NET.IF.IFADMINSTATUS.MATCHES}'
value: '^.*'
description: 'This macro is used in filters of network interfaces discovery rule.'
@@ -2833,58 +1652,6 @@ zabbix_export:
macro: '{$STATE.TABLE.UTIL.MAX}'
value: '90'
description: 'Threshold of state table utilization trigger in %.'
- -
- macro: '{$SWAP.PFREE.MIN.WARN}'
- value: '50'
- description: 'Threshold of free swap space for warning trigger in %.'
- -
- macro: '{$VFS.DEV.DEVNAME.MATCHES}'
- value: .+
- description: 'This macro is used in block devices discovery. Can be overridden on the host or linked template level'
- -
- macro: '{$VFS.DEV.DEVNAME.NOT_MATCHES}'
- value: '^(loop[0-9]*|sd[a-z][0-9]+|nbd[0-9]+|sr[0-9]+|fd[0-9]+|dm-[0-9]+|ram[0-9]+|ploop[a-z0-9]+|md[0-9]*|hcp[0-9]*|cd[0-9]*|pass[0-9]*|zram[0-9]*)'
- description: 'This macro is used in block devices discovery. Can be overridden on the host or linked template level'
- -
- macro: '{$VFS.FS.FREE.MIN.CRIT}'
- value: 5G
- description: 'The critical threshold of the filesystem utilization.'
- -
- macro: '{$VFS.FS.FREE.MIN.WARN}'
- value: 10G
- description: 'The warning threshold of the filesystem utilization.'
- -
- macro: '{$VFS.FS.FSNAME.MATCHES}'
- value: .+
- description: 'This macro is used in filesystems discovery. Can be overridden on the host or linked template level'
- -
- macro: '{$VFS.FS.FSNAME.NOT_MATCHES}'
- value: ^(/dev|/sys|/run|/var/run|/proc|.+/shm$)
- description: 'This macro is used in filesystems discovery. Can be overridden on the host or linked template level'
- -
- macro: '{$VFS.FS.FSTYPE.MATCHES}'
- value: '.*(9.3|hrFSBerkeleyFFS)$'
- description: 'This macro is used in filesystems discovery. Can be overridden on the host or linked template level'
- -
- macro: '{$VFS.FS.FSTYPE.NOT_MATCHES}'
- value: ^\s$
- description: 'This macro is used in filesystems discovery. Can be overridden on the host or linked template level'
- -
- macro: '{$VFS.FS.INODE.PFREE.MIN.CRIT}'
- value: '10'
- description: 'Threshold of inodes usage for average severity trigger in %. Can be used with filesystem name as context.'
- -
- macro: '{$VFS.FS.INODE.PFREE.MIN.WARN}'
- value: '20'
- description: 'Threshold of inodes usage for warning trigger in %. Can be used with filesystem name as context.'
- -
- macro: '{$VFS.FS.PUSED.MAX.CRIT}'
- value: '90'
- description: 'Threshold of filesystem used space for average severity trigger in %. Can be used with filesystem name as context.'
- -
- macro: '{$VFS.FS.PUSED.MAX.WARN}'
- value: '80'
- description: 'Threshold of used filesystem space for warning trigger in %. Can be used with filesystem name as context.'
valuemaps:
-
uuid: 2216d8afb349448394590770ff99e56a
@@ -3784,16 +2551,6 @@ zabbix_export:
value: '2'
newvalue: running
-
- uuid: c3c721156c624317af993e1a263c16bc
- name: 'Service state'
- mappings:
- -
- value: '0'
- newvalue: Down
- -
- value: '1'
- newvalue: Up
- -
uuid: f6bb441b7a65446c96c546bd3f3140ee
name: 'SNMPv2-TC::TruthValue'
mappings:
@@ -3816,106 +2573,8 @@ zabbix_export:
-
value: '2'
newvalue: unknown
- triggers:
- -
- uuid: fe568255690f4125847230be3ba88f54
- expression: 'min(/PFSense SNMP/system.swap.pfree,5m)<{$SWAP.PFREE.MIN.WARN} and last(/PFSense SNMP/system.swap.total)>0'
- name: 'PFSense: High swap space usage'
- event_name: 'PFSense: High swap space usage (less than {$SWAP.PFREE.MIN.WARN}% free)'
- opdata: 'Free: {ITEM.LASTVALUE1}, total: {ITEM.LASTVALUE2}'
- priority: WARNING
- description: 'This trigger is ignored, if there is no swap configured.'
- dependencies:
- -
- name: 'PFSense: High memory utilization'
- expression: 'min(/PFSense SNMP/vm.memory.util,5m)>{$MEMORY.UTIL.MAX}'
- -
- name: 'PFSense: Lack of available memory'
- expression: 'min(/PFSense SNMP/vm.memory.available,5m)<{$MEMORY.AVAILABLE.MIN} and last(/PFSense SNMP/vm.memory.total)>0'
- tags:
- -
- tag: scope
- value: capacity
- -
- tag: scope
- value: performance
- -
- uuid: d890efaff23f49e8ba05d22390f9e21b
- expression: 'min(/PFSense SNMP/vm.memory.available,5m)<{$MEMORY.AVAILABLE.MIN} and last(/PFSense SNMP/vm.memory.total)>0'
- name: 'PFSense: Lack of available memory'
- event_name: 'PFSense: Lack of available memory (<{$MEMORY.AVAILABLE.MIN} of {ITEM.VALUE2})'
- opdata: 'Available: {ITEM.LASTVALUE1}, total: {ITEM.LASTVALUE2}'
- priority: AVERAGE
- description: 'The system is running out of memory.'
- tags:
- -
- tag: scope
- value: capacity
- -
- tag: scope
- value: performance
- -
- uuid: ffbe648f887744de91a6e1206518fc74
- expression: |
- min(/PFSense SNMP/system.cpu.load.avg1,5m)/last(/PFSense SNMP/system.cpu.num)>{$LOAD_AVG_PER_CPU.MAX.WARN}
- and last(/PFSense SNMP/system.cpu.load.avg5)>0
- and last(/PFSense SNMP/system.cpu.load.avg15)>0
- name: 'PFSense: Load average is too high'
- event_name: 'PFSense: Load average is too high (per CPU load over {$LOAD_AVG_PER_CPU.MAX.WARN} for 5m)'
- opdata: 'Load averages(1m 5m 15m): ({ITEM.LASTVALUE1} {ITEM.LASTVALUE3} {ITEM.LASTVALUE4}), # of CPUs: {ITEM.LASTVALUE2}'
- priority: AVERAGE
- description: 'Per CPU load average is too high. Your system may be slow to respond.'
- tags:
- -
- tag: scope
- value: performance
graphs:
-
- uuid: 7fdf3de55e6b48abb9c6fbbf4a0f4ceb
- name: 'PFSense: CPU jumps'
- graph_items:
- -
- color: 1A7C11
- item:
- host: 'PFSense SNMP'
- key: system.cpu.switches
- -
- sortorder: '1'
- color: 2774A4
- item:
- host: 'PFSense SNMP'
- key: system.cpu.intr
- -
- uuid: f282a7c00c5a451e93c1a62939999041
- name: 'PFSense: Memory usage'
- ymin_type_1: FIXED
- graph_items:
- -
- drawtype: BOLD_LINE
- color: 1A7C11
- item:
- host: 'PFSense SNMP'
- key: vm.memory.total
- -
- sortorder: '1'
- drawtype: GRADIENT_LINE
- color: 2774A4
- item:
- host: 'PFSense SNMP'
- key: vm.memory.available
- -
- uuid: 38b05f5d70f84ac69220e93bc9636b76
- name: 'PFSense: Memory utilization'
- ymin_type_1: FIXED
- ymax_type_1: FIXED
- graph_items:
- -
- drawtype: GRADIENT_LINE
- color: 1A7C11
- item:
- host: 'PFSense SNMP'
- key: vm.memory.util
- -
uuid: cd45a63b52d7470db6b4574027433c8e
name: 'PFSense: Packet filter reason codes'
type: STACKED
@@ -3956,47 +2615,3 @@ zabbix_export:
item:
host: 'PFSense SNMP'
key: pfsense.packets.mem.drop
- -
- uuid: 33a80d114d82400ebe2b3583726a5526
- name: 'PFSense: Swap usage'
- graph_items:
- -
- color: 1A7C11
- item:
- host: 'PFSense SNMP'
- key: system.swap.free
- -
- sortorder: '1'
- color: 2774A4
- item:
- host: 'PFSense SNMP'
- key: system.swap.total
- -
- uuid: d1e3ad2d98bd42a0bdd8435e94c15c3c
- name: 'PFSense: System load'
- ymin_type_1: FIXED
- graph_items:
- -
- color: 1A7C11
- item:
- host: 'PFSense SNMP'
- key: system.cpu.load.avg1
- -
- sortorder: '1'
- color: 2774A4
- item:
- host: 'PFSense SNMP'
- key: system.cpu.load.avg5
- -
- sortorder: '2'
- color: F63100
- item:
- host: 'PFSense SNMP'
- key: system.cpu.load.avg15
- -
- sortorder: '3'
- color: A54F10
- yaxisside: RIGHT
- item:
- host: 'PFSense SNMP'
- key: system.cpu.num
diff --git a/templates/db/mongodb/README.md b/templates/db/mongodb/README.md
index fd7902d3f39..f989d5e832d 100644
--- a/templates/db/mongodb/README.md
+++ b/templates/db/mongodb/README.md
@@ -191,7 +191,7 @@ There are no template links in this template.
|----|-----------|----|----|----|
|MongoDB: Connection to MongoDB is unavailable |<p>Connection to MongoDB instance is currently unavailable.</p> |`last(/MongoDB node by Zabbix agent 2/mongodb.ping["{$MONGODB.CONNSTRING}","{$MONGODB.USER}","{$MONGODB.PASSWORD}"])=0` |HIGH | |
|MongoDB: Version has changed |<p>MongoDB version has changed. Ack to close.</p> |`last(/MongoDB node by Zabbix agent 2/mongodb.version,#1)<>last(/MongoDB node by Zabbix agent 2/mongodb.version,#2) and length(last(/MongoDB node by Zabbix agent 2/mongodb.version))>0` |INFO |<p>Manual close: YES</p> |
-|MongoDB: has been restarted |<p>Uptime is less than 10 minutes</p> |`last(/MongoDB node by Zabbix agent 2/mongodb.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|MongoDB: has been restarted |<p>Uptime is less than 10 minutes.</p> |`last(/MongoDB node by Zabbix agent 2/mongodb.uptime)<10m` |INFO |<p>Manual close: YES</p> |
|MongoDB: Failed to fetch info data |<p>Zabbix has not received data for items for the last 10 minutes</p> |`nodata(/MongoDB node by Zabbix agent 2/mongodb.uptime,10m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- MongoDB: Connection to MongoDB is unavailable</p> |
|MongoDB: Total number of open connections is too high |<p>Too few available connections. If MongoDB runs low on connections, in may not be able to handle incoming requests in a timely manner.</p> |`min(/MongoDB node by Zabbix agent 2/mongodb.connections.current,5m)/(last(/MongoDB node by Zabbix agent 2/mongodb.connections.available)+last(/MongoDB node by Zabbix agent 2/mongodb.connections.current))*100>{$MONGODB.CONNS.PCT.USED.MAX.WARN}` |WARNING | |
|MongoDB: Too many cursors opened by MongoDB for clients |<p>-</p> |`min(/MongoDB node by Zabbix agent 2/mongodb.cursor.open.total,5m)>{$MONGODB.CURSOR.OPEN.MAX.WARN}` |WARNING | |
diff --git a/templates/db/mongodb/template_db_mongodb.yaml b/templates/db/mongodb/template_db_mongodb.yaml
index adedc807678..e6069a98abd 100644
--- a/templates/db/mongodb/template_db_mongodb.yaml
+++ b/templates/db/mongodb/template_db_mongodb.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-04-06T19:33:10Z'
+ date: '2022-05-06T14:09:10Z'
groups:
-
uuid: 748ad4d098d447d492bb935c907f652f
@@ -486,7 +486,7 @@ zabbix_export:
tags:
-
tag: component
- value: documents
+ value: cursors
-
uuid: f23bf716eced4ba5948b06c24c780904
name: 'MongoDB: Cursor: open total'
@@ -692,7 +692,7 @@ zabbix_export:
tags:
-
tag: component
- value: documents
+ value: cursors
-
uuid: ba0502f80a044670993bdb63587cd282
name: 'MongoDB: Bytes in, rate'
@@ -1031,7 +1031,7 @@ zabbix_export:
name: 'MongoDB: has been restarted'
event_name: 'MongoDB: has been restarted (uptime < 10m)'
priority: INFO
- description: 'Uptime is less than 10 minutes'
+ description: 'Uptime is less than 10 minutes.'
manual_close: 'YES'
tags:
-
diff --git a/templates/db/mongodb_cluster/README.md b/templates/db/mongodb_cluster/README.md
index 10d608cdcf1..8a46ee9c45d 100644
--- a/templates/db/mongodb_cluster/README.md
+++ b/templates/db/mongodb_cluster/README.md
@@ -9,7 +9,6 @@ Most of the metrics are collected in one go, thanks to Zabbix bulk data collecti
`MongoDB cluster by Zabbix agent 2` — collects metrics from mongos proxy(router) by polling zabbix-agent2.
-
This template was tested on:
- MongoDB, version 4.0.21, 4.4.3
@@ -18,7 +17,6 @@ This template was tested on:
> See [Zabbix template operation](https://www.zabbix.com/documentation/6.0/manual/config/templates_out_of_the_box/zabbix_agent2) for basic instructions.
-
1. Setup and configure zabbix-agent2 compiled with the MongoDB monitoring plugin.
2. Set the {$MONGODB.CONNSTRING} such as <protocol(host:port)> or named session of mongos proxy(router).
3. Set the user name and password in host macros ({$MONGODB.USER}, {$MONGODB.PASSWORD}) if you want to override parameters from the Zabbix agent configuration file.
@@ -30,7 +28,6 @@ All sharded Mongodb nodes (mongod) will be discovered with attached template "Mo
Test availability: `zabbix_get -s mongos.node -k 'mongodb.ping["{$MONGODB.CONNSTRING}","{$MONGODB.USER}","{$MONGODB.PASSWORD}"]"`
-
## Zabbix configuration
No specific Zabbix configuration is required.
@@ -77,7 +74,7 @@ There are no template links in this template.
|MongoDB sharded cluster |MongoDB cluster: Operations: query, rate |<p>The number of queries received the mongos instance per second.</p> |DEPENDENT |mongodb.opcounters.query.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.opcounters.query`</p><p>- CHANGE_PER_SECOND</p> |
|MongoDB sharded cluster |MongoDB cluster: Operations: insert, rate |<p>The number of insert operations received the mongos instance per second.</p> |DEPENDENT |mongodb.opcounters.insert.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.opcounters.insert`</p><p>- CHANGE_PER_SECOND</p> |
|MongoDB sharded cluster |MongoDB cluster: Operations: getmore, rate |<p>"The number of “getmore” operations the mongos per second. This counter can be high even if the query count is low.</p><p>Secondary nodes send getMore operations as part of the replication process."</p> |DEPENDENT |mongodb.opcounters.getmore.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.opcounters.getmore`</p><p>- CHANGE_PER_SECOND</p> |
-|MongoDB sharded cluster |MongoDB cluster: Last seen configserver |<p>The latest optime of the CSRS primary that the mongos has seen.</p> |DEPENDENT |mongodb.last_seen_config_server<p>**Preprocessing**:</p><p>- JAVASCRIPT: `data = JSON.parse(value) return data.sharding.lastSeenConfigServerOpTime.ts/Math.pow(2,32) `</p> |
+|MongoDB sharded cluster |MongoDB cluster: Last seen configserver |<p>The latest optime of the CSRS primary that the mongos has seen.</p> |DEPENDENT |mongodb.last_seen_config_server<p>**Preprocessing**:</p><p>- JSONPATH: `$.sharding.lastSeenConfigServerOpTime.ts.T`</p> |
|MongoDB sharded cluster |MongoDB cluster: Configserver heartbeat |<p>Difference between the latest optime of the CSRS primary that the mongos has seen and cluster time.</p> |DEPENDENT |mongodb.config_server_heartbeat<p>**Preprocessing**:</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
|MongoDB sharded cluster |MongoDB cluster: Bytes in, rate |<p>The total number of bytes that the server has received over network connections initiated by clients or other mongod/mongos instances per second.</p> |DEPENDENT |mongodb.network.bytes_in.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.network.bytesIn`</p><p>- CHANGE_PER_SECOND</p> |
|MongoDB sharded cluster |MongoDB cluster: Bytes out, rate |<p>The total number of bytes that the server has sent over network connections initiated by clients or other mongod/mongos instances per second.</p> |DEPENDENT |mongodb.network.bytes_out.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.network.bytesOut`</p><p>- CHANGE_PER_SECOND</p> |
@@ -92,7 +89,7 @@ There are no template links in this template.
|MongoDB sharded cluster |MongoDB cluster: Connection pool: available |<p>The total number of available outgoing connections from the current mongos instance to other members of the sharded cluster.</p> |DEPENDENT |mongodb.connection_pool.available<p>**Preprocessing**:</p><p>- JSONPATH: `$.totalAvailable`</p> |
|MongoDB sharded cluster |MongoDB cluster: Connection pool: in use |<p>Reports the total number of outgoing connections from the current mongos instance to other members of the sharded cluster set that are currently in use.</p> |DEPENDENT |mongodb.connection_pool.in_use<p>**Preprocessing**:</p><p>- JSONPATH: `$.totalInUse`</p> |
|MongoDB sharded cluster |MongoDB cluster: Connection pool: refreshing |<p>Reports the total number of outgoing connections from the current mongos instance to other members of the sharded cluster that are currently being refreshed.</p> |DEPENDENT |mongodb.connection_pool.refreshing<p>**Preprocessing**:</p><p>- JSONPATH: `$.totalRefreshing`</p> |
-|MongoDB sharded cluster |MongoDB cluster: Cursor: open no timeout |<p>Number of open cursors with the option DBQuery.Option.noTimeout set to prevent timeout after a period of inactivity.</p> |DEPENDENT |mongodb.metrics.cursor.open.no_timeout<p>**Preprocessing**:</p><p>- JSONPATH: `$.metrics.cursor.open.noTimeout`</p> |
+|MongoDB sharded cluster |MongoDB cluster: Cursor: open no timeout |<p>Number of open cursors with the option DBQuery.Option.noTimeout set to prevent timeout after a period of inactivity.</p> |DEPENDENT |mongodb.metrics.cursor.open.no_timeout<p>**Preprocessing**:</p><p>- JSONPATH: `$.metrics.cursor.open.noTimeout`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p> |
|MongoDB sharded cluster |MongoDB cluster: Cursor: open pinned |<p>Number of pinned open cursors.</p> |DEPENDENT |mongodb.cursor.open.pinned<p>**Preprocessing**:</p><p>- JSONPATH: `$.metrics.cursor.open.pinned`</p> |
|MongoDB sharded cluster |MongoDB cluster: Cursor: open total |<p>Number of cursors that MongoDB is maintaining for clients.</p> |DEPENDENT |mongodb.cursor.open.total<p>**Preprocessing**:</p><p>- JSONPATH: `$.metrics.cursor.open.total`</p> |
|MongoDB sharded cluster |MongoDB cluster: Cursor: timed out, rate |<p>Number of cursors that time out, per second.</p> |DEPENDENT |mongodb.cursor.timed_out.rate<p>**Preprocessing**:</p><p>- JSONPATH: `$.metrics.cursor.timedOut`</p><p>- CHANGE_PER_SECOND</p> |
@@ -125,7 +122,7 @@ There are no template links in this template.
|----|-----------|----|----|----|
|MongoDB cluster: Connection to mongos proxy is unavailable |<p>Connection to mongos proxy instance is currently unavailable.</p> |`last(/MongoDB cluster by Zabbix agent 2/mongodb.ping["{$MONGODB.CONNSTRING}","{$MONGODB.USER}","{$MONGODB.PASSWORD}"])=0` |HIGH | |
|MongoDB cluster: Version has changed |<p>MongoDB cluster version has changed. Ack to close.</p> |`last(/MongoDB cluster by Zabbix agent 2/mongodb.version,#1)<>last(/MongoDB cluster by Zabbix agent 2/mongodb.version,#2) and length(last(/MongoDB cluster by Zabbix agent 2/mongodb.version))>0` |INFO |<p>Manual close: YES</p> |
-|MongoDB cluster: has been restarted |<p>Uptime is less than 10 minutes</p> |`last(/MongoDB cluster by Zabbix agent 2/mongodb.uptime)<10m` |INFO |<p>Manual close: YES</p> |
+|MongoDB cluster: has been restarted |<p>Uptime is less than 10 minutes.</p> |`last(/MongoDB cluster by Zabbix agent 2/mongodb.uptime)<10m` |INFO |<p>Manual close: YES</p> |
|MongoDB cluster: Failed to fetch info data |<p>Zabbix has not received data for items for the last 10 minutes</p> |`nodata(/MongoDB cluster by Zabbix agent 2/mongodb.uptime,10m)=1` |WARNING |<p>Manual close: YES</p><p>**Depends on**:</p><p>- MongoDB cluster: Connection to mongos proxy is unavailable</p> |
|MongoDB cluster: Available connections is low |<p>"Too few available connections.</p><p>Consider this value in combination with the value of connections current to understand the connection load on the database"</p> |`max(/MongoDB cluster by Zabbix agent 2/mongodb.connections.available,5m)<{$MONGODB.CONNS.AVAILABLE.MIN.WARN}` |WARNING | |
|MongoDB cluster: Too many cursors opened by MongoDB for clients |<p>-</p> |`min(/MongoDB cluster by Zabbix agent 2/mongodb.cursor.open.total,5m)>{$MONGODB.CURSOR.OPEN.MAX.WARN}` |WARNING | |
diff --git a/templates/db/mongodb_cluster/template_db_mongodb_cluster.yaml b/templates/db/mongodb_cluster/template_db_mongodb_cluster.yaml
index 672bb34fa2c..ffc65d8e0e5 100644
--- a/templates/db/mongodb_cluster/template_db_mongodb_cluster.yaml
+++ b/templates/db/mongodb_cluster/template_db_mongodb_cluster.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-04-06T19:33:11Z'
+ date: '2022-05-06T14:17:41Z'
groups:
-
uuid: 748ad4d098d447d492bb935c907f652f
@@ -39,8 +39,8 @@ zabbix_export:
type: JAVASCRIPT
parameters:
- |
- data = JSON.parse(value)
- return (data["$clusterTime"].clusterTime-data.sharding.lastSeenConfigServerOpTime.ts)/Math.pow(2,32);
+ data = JSON.parse(value);
+ return (data["$clusterTime"].clusterTime.T - data.sharding.lastSeenConfigServerOpTime.ts.T);
master_item:
key: 'mongodb.server.status["{$MONGODB.CONNSTRING}","{$MONGODB.USER}","{$MONGODB.PASSWORD}"]'
tags:
@@ -388,11 +388,9 @@ zabbix_export:
description: 'The latest optime of the CSRS primary that the mongos has seen.'
preprocessing:
-
- type: JAVASCRIPT
+ type: JSONPATH
parameters:
- - |
- data = JSON.parse(value)
- return data.sharding.lastSeenConfigServerOpTime.ts/Math.pow(2,32)
+ - $.sharding.lastSeenConfigServerOpTime.ts.T
master_item:
key: 'mongodb.server.status["{$MONGODB.CONNSTRING}","{$MONGODB.USER}","{$MONGODB.PASSWORD}"]'
tags:
@@ -484,6 +482,7 @@ zabbix_export:
type: JSONPATH
parameters:
- $.metrics.cursor.open.noTimeout
+ error_handler: DISCARD_VALUE
master_item:
key: 'mongodb.server.status["{$MONGODB.CONNSTRING}","{$MONGODB.USER}","{$MONGODB.PASSWORD}"]'
tags:
@@ -796,7 +795,7 @@ zabbix_export:
name: 'MongoDB cluster: has been restarted'
event_name: 'MongoDB cluster: has been restarted (uptime < 10m)'
priority: INFO
- description: 'Uptime is less than 10 minutes'
+ description: 'Uptime is less than 10 minutes.'
manual_close: 'YES'
tags:
-
diff --git a/templates/db/mssql_odbc/README.md b/templates/db/mssql_odbc/README.md
index 08f39bd1a5e..0e31239be2b 100644
--- a/templates/db/mssql_odbc/README.md
+++ b/templates/db/mssql_odbc/README.md
@@ -93,7 +93,7 @@ There are no template links in this template.
|----|-----------|----|----|
|Availability groups discovery |<p>Discovery of the existing availability groups.</p> |ODBC |db.odbc.discovery[availability_groups,"{$MSSQL.DSN}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Database discovery |<p>Scanning databases in DBMS.</p> |ODBC |db.odbc.discovery[dbname,"{$MSSQL.DSN}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p><p>**Filter**:</p>AND <p>- {#DBNAME} MATCHES_REGEX `{$MSSQL.DBNAME.MATCHES}`</p><p>- {#DBNAME} NOT_MATCHES_REGEX `{$MSSQL.DBNAME.NOT_MATCHES}`</p> |
-|Job discovery |<p>Scanning jobs in DBMS.</p> |ODBC |db.odbc.discovery[jobname,"{$MSSQL.DSN}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p><p>**Filter**:</p>AND_OR <p>- {#JOBNAME} MATCHES_REGEX `{$MSSQL.JOB.MATCHES}`</p><p>- {#JOBNAME} NOT_MATCHES_REGEX `{$MSSQL.JOB.NOT_MATCHES}`</p> |
+|Job discovery |<p>Scanning jobs in DBMS.</p> |ODBC |db.odbc.discovery[jobname,"{$MSSQL.DSN}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p><p>**Filter**:</p>AND <p>- {#JOBNAME} MATCHES_REGEX `{$MSSQL.JOB.MATCHES}`</p><p>- {#JOBNAME} NOT_MATCHES_REGEX `{$MSSQL.JOB.NOT_MATCHES}`</p> |
|Local database discovery |<p>Discovery of the local availability databases.</p> |ODBC |db.odbc.discovery[local_db,"{$MSSQL.DSN}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Mirroring discovery |<p>To see the row for a database other than master or tempdb, you must</p><p>either be the database owner or have at least ALTER ANY DATABASE or VIEW ANY</p><p>DATABASE server-level permission or CREATE DATABASE permission in the master</p><p>database. To see non-NULL values on a mirror database, you must be a member</p><p>of the sysadmin fixed server role.</p> |ODBC |db.odbc.discovery[mirrors,"{$MSSQL.DSN}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
|Non-local database discovery |<p>Discovery of the non-local (not local to the SQL Server instance) availability databases.</p> |ODBC |db.odbc.discovery[non-local_db,"{$MSSQL.DSN}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
diff --git a/templates/db/mssql_odbc/template_db_mssql_odbc.yaml b/templates/db/mssql_odbc/template_db_mssql_odbc.yaml
index b2ef94a9bd8..2274c62e55f 100644
--- a/templates/db/mssql_odbc/template_db_mssql_odbc.yaml
+++ b/templates/db/mssql_odbc/template_db_mssql_odbc.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-04-13T22:55:23Z'
+ date: '2022-06-07T10:46:01Z'
groups:
-
uuid: 748ad4d098d447d492bb935c907f652f
@@ -3005,6 +3005,7 @@ zabbix_export:
username: '{$MSSQL.USER}'
password: '{$MSSQL.PASSWORD}'
filter:
+ evaltype: AND
conditions:
-
macro: '{#JOBNAME}'
diff --git a/templates/media/express.ms/media_express_ms.yaml b/templates/media/express.ms/media_express_ms.yaml
index 2c579c8cbee..2dca12a7a07 100644
--- a/templates/media/express.ms/media_express_ms.yaml
+++ b/templates/media/express.ms/media_express_ms.yaml
@@ -1,33 +1,33 @@
zabbix_export:
- version: '5.4'
- date: '2021-04-28T08:05:02Z'
+ version: '6.0'
+ date: '2022-06-09T11:50:44Z'
media_types:
-
name: Express.ms
type: WEBHOOK
parameters:
- 4:
+ -
name: event_source
value: '{EVENT.SOURCE}'
- 6:
+ -
name: event_update_status
value: '{EVENT.UPDATE.STATUS}'
- 5:
+ -
name: event_value
value: '{EVENT.VALUE}'
- 2:
+ -
name: express_message
value: '{ALERT.MESSAGE}'
- 1:
+ -
name: express_send_to
value: '{ALERT.SENDTO}'
- 3:
+ -
name: express_tags
value: '{EVENT.TAGSJSON}'
- 7:
+ -
name: express_token
value: '<PLACE BOT TOKEN>'
- 0:
+ -
name: express_url
value: '<PLACE INSTANCE URL>'
script: |
@@ -111,7 +111,7 @@ zabbix_export:
body: Express.params.message
}
};
- url = 'api/v3/botx/notification/callback/direct';
+ url = 'api/v4/botx/notifications/direct';
}
else {
data = {
diff --git a/templates/media/mattermost/README.md b/templates/media/mattermost/README.md
index 4d61425331a..3c5afcf9af4 100644
--- a/templates/media/mattermost/README.md
+++ b/templates/media/mattermost/README.md
@@ -2,7 +2,7 @@
# Mattermost webhook
![](images/logoHorizontal.png?raw=true)
-This guide describes how to integrate your Zabbix 4.4 installation with Mattermost using the Zabbix webhook feature. This guide will provide instructions on setting up a media type, a user and an action in Zabbix.
+This guide describes how to integrate your Zabbix 6.0 installation with Mattermost using the Zabbix webhook feature. This guide will provide instructions on setting up a media type, a user and an action in Zabbix.
## Setting up a Mattermost bot
@@ -54,7 +54,7 @@ This guide describes how to integrate your Zabbix 4.4 installation with Mattermo
* You can also choose between two notification modes:
- **alarm** (default)
- - Update messages will be attached as replies to Slack message thread
+ - Update messages will be attached as replies to Mattermost message thread
- Recovery message from Zabbix will update initial message
- **event**
- Recovery and update messages from Zabbix will be posted as new messages
@@ -85,4 +85,4 @@ For more information, use the [Zabbix](https://www.zabbix.com/documentation/6.0/
## Supported Versions
-Zabbix 4.4
+Zabbix 6.0
diff --git a/templates/media/msteams/media_msteams.yaml b/templates/media/msteams/media_msteams.yaml
index 02bd1581f7c..d128bde7897 100644
--- a/templates/media/msteams/media_msteams.yaml
+++ b/templates/media/msteams/media_msteams.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '5.4'
- date: '2020-10-16T09:33:49Z'
+ date: '2022-06-07T04:55:32Z'
media_types:
-
name: 'MS Teams'
@@ -144,7 +144,7 @@ zabbix_export:
facts = [],
body = {
themeColor: SEVERITY_COLORS[params.event_nseverity].replace('#', ''),
- summary: 'Zabbix',
+ summary: params.alert_subject,
sections: [
{
markdown: 'false',
diff --git a/templates/media/slack/README.md b/templates/media/slack/README.md
index eebd8cfc9f3..a7b7b5cede5 100644
--- a/templates/media/slack/README.md
+++ b/templates/media/slack/README.md
@@ -6,15 +6,15 @@ This guide describes how to integrate your Zabbix 6.0 and higher installation wi
## Setting up a Slack bot
-1\. On the page [Your Apps](https://api.slack.com/apps) press **Create New App** and specify its name and workspace.
+1\. On the page [Your Apps](https://api.slack.com/apps) press **Create an App**, select **From scratch** and specify its name and workspace.
2\. In the **Add features and functionality** section, select **Bots** and press **Review Scopes to Add**.
-3\. In the **Scopes** section, find **Scopes**, press **Add an OAuth Scope** and add **chat:write**, **im:write** and **groups:write** scopes.
+3\. In the **Scopes** section, find **Bot Token Scopes**, press **Add an OAuth Scope** and add **chat:write**, **im:write** and **groups:write** scopes.
-4\. Press **Install to Workspace** on the top of the page.
+4\. In the **Settings** section on the left side of the page press **Install App** and then **Install to Workspace**.
-5\. Now you have bot token, but you only need to use **Bot User OAuth Access Token**.
+5\. Press **Allow** and copy **Bot User OAuth Access Token**, which will be used to set up webhook.
## Zabbix Webhook configuration
@@ -50,4 +50,4 @@ For more information, use the [Zabbix](https://www.zabbix.com/documentation/6.0/
## Supported Versions
-Zabbix 5.4
+Zabbix 6.0
diff --git a/templates/module/windows_agent/README.md b/templates/module/windows_agent/README.md
index 304d7803013..25217dcc3aa 100644
--- a/templates/module/windows_agent/README.md
+++ b/templates/module/windows_agent/README.md
@@ -369,7 +369,7 @@ No specific Zabbix configuration is required.
|Name|Description|Default|
|----|-----------|-------|
|{$SERVICE.NAME.MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^.*$` |
-|{$SERVICE.NAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$` |
+|{$SERVICE.NAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$` |
|{$SERVICE.STARTUPNAME.MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:automatic|automatic delayed)$` |
|{$SERVICE.STARTUPNAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:manual|disabled)$` |
diff --git a/templates/module/windows_agent/template_module_windows_agent.yaml b/templates/module/windows_agent/template_module_windows_agent.yaml
index 24c39970d4a..9f7dade67d0 100644
--- a/templates/module/windows_agent/template_module_windows_agent.yaml
+++ b/templates/module/windows_agent/template_module_windows_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-05-16T06:45:02Z'
+ date: '2022-06-01T13:18:45Z'
groups:
-
uuid: 57b7ae836ca64446ba2c296389c009b7
@@ -1782,7 +1782,7 @@ zabbix_export:
description: 'This macro is used in Service discovery. Can be overridden on the host or linked template level.'
-
macro: '{$SERVICE.NAME.NOT_MATCHES}'
- value: '^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$'
+ value: '^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$'
description: 'This macro is used in Service discovery. Can be overridden on the host or linked template level.'
-
macro: '{$SERVICE.STARTUPNAME.MATCHES}'
diff --git a/templates/module/windows_agent_active/README.md b/templates/module/windows_agent_active/README.md
index 4834ce96059..aeff0fd20b8 100644
--- a/templates/module/windows_agent_active/README.md
+++ b/templates/module/windows_agent_active/README.md
@@ -369,7 +369,7 @@ No specific Zabbix configuration is required.
|Name|Description|Default|
|----|-----------|-------|
|{$SERVICE.NAME.MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^.*$` |
-|{$SERVICE.NAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$` |
+|{$SERVICE.NAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$` |
|{$SERVICE.STARTUPNAME.MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:automatic|automatic delayed)$` |
|{$SERVICE.STARTUPNAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:manual|disabled)$` |
diff --git a/templates/module/windows_agent_active/template_module_windows_agent_active.yaml b/templates/module/windows_agent_active/template_module_windows_agent_active.yaml
index 6b883a7164f..d181b0d8a8a 100644
--- a/templates/module/windows_agent_active/template_module_windows_agent_active.yaml
+++ b/templates/module/windows_agent_active/template_module_windows_agent_active.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-05-16T06:44:49Z'
+ date: '2022-06-01T13:18:51Z'
groups:
-
uuid: 57b7ae836ca64446ba2c296389c009b7
@@ -1828,7 +1828,7 @@ zabbix_export:
description: 'This macro is used in Service discovery. Can be overridden on the host or linked template level.'
-
macro: '{$SERVICE.NAME.NOT_MATCHES}'
- value: '^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$'
+ value: '^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$'
description: 'This macro is used in Service discovery. Can be overridden on the host or linked template level.'
-
macro: '{$SERVICE.STARTUPNAME.MATCHES}'
diff --git a/templates/os/freebsd/template_os_freebsd.yaml b/templates/os/freebsd/template_os_freebsd.yaml
index 23de8fc2970..df279e1a80a 100644
--- a/templates/os/freebsd/template_os_freebsd.yaml
+++ b/templates/os/freebsd/template_os_freebsd.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-04-06T19:27:27Z'
+ date: '2022-06-06T06:58:28Z'
groups:
-
uuid: 846977d1dfed4968bc5f8bdb363285bc
@@ -467,6 +467,8 @@ zabbix_export:
key: 'vfs.file.cksum[/etc/passwd,sha256]'
delay: 1h
history: 1w
+ trends: '0'
+ value_type: CHAR
tags:
-
tag: component
diff --git a/templates/os/windows_agent/README.md b/templates/os/windows_agent/README.md
index 1aa5883a074..0a26b31932b 100644
--- a/templates/os/windows_agent/README.md
+++ b/templates/os/windows_agent/README.md
@@ -43,7 +43,7 @@ No specific Zabbix configuration is required.
|{$NET.IF.IFNAME.MATCHES} |<p>This macro is used in Network interface discovery. Can be overridden on the host or linked template level.</p> |`.*` |
|{$NET.IF.IFNAME.NOT_MATCHES} |<p>This macro is used in Network interface discovery. Can be overridden on the host or linked template level.</p> |`Miniport|Virtual|Teredo|Kernel|Loopback|Bluetooth|HTTPS|6to4|QoS|Layer` |
|{$SERVICE.NAME.MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^.*$` |
-|{$SERVICE.NAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$` |
+|{$SERVICE.NAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$` |
|{$SERVICE.STARTUPNAME.MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:automatic|automatic delayed)$` |
|{$SERVICE.STARTUPNAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:manual|disabled)$` |
|{$SWAP.PFREE.MIN.WARN} |<p>The warning threshold of the minimum free swap.</p> |`20` |
diff --git a/templates/os/windows_agent/template_os_windows_agent.yaml b/templates/os/windows_agent/template_os_windows_agent.yaml
index e54c8ff6ee2..0fcfc4b63b1 100644
--- a/templates/os/windows_agent/template_os_windows_agent.yaml
+++ b/templates/os/windows_agent/template_os_windows_agent.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-05-16T06:45:43Z'
+ date: '2022-06-01T13:19:02Z'
groups:
-
uuid: 846977d1dfed4968bc5f8bdb363285bc
@@ -1620,7 +1620,7 @@ zabbix_export:
description: 'This macro is used in Service discovery. Can be overridden on the host or linked template level.'
-
macro: '{$SERVICE.NAME.NOT_MATCHES}'
- value: '^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$'
+ value: '^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$'
description: 'This macro is used in Service discovery. Can be overridden on the host or linked template level.'
-
macro: '{$SERVICE.STARTUPNAME.MATCHES}'
diff --git a/templates/os/windows_agent_active/README.md b/templates/os/windows_agent_active/README.md
index 5b9daf7377d..09566934c7a 100644
--- a/templates/os/windows_agent_active/README.md
+++ b/templates/os/windows_agent_active/README.md
@@ -43,7 +43,7 @@ No specific Zabbix configuration is required.
|{$NET.IF.IFNAME.MATCHES} |<p>This macro is used in Network interface discovery. Can be overridden on the host or linked template level.</p> |`.*` |
|{$NET.IF.IFNAME.NOT_MATCHES} |<p>This macro is used in Network interface discovery. Can be overridden on the host or linked template level.</p> |`Miniport|Virtual|Teredo|Kernel|Loopback|Bluetooth|HTTPS|6to4|QoS|Layer` |
|{$SERVICE.NAME.MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^.*$` |
-|{$SERVICE.NAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$` |
+|{$SERVICE.NAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$` |
|{$SERVICE.STARTUPNAME.MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:automatic|automatic delayed)$` |
|{$SERVICE.STARTUPNAME.NOT_MATCHES} |<p>This macro is used in Service discovery. Can be overridden on the host or linked template level.</p> |`^(?:manual|disabled)$` |
|{$SWAP.PFREE.MIN.WARN} |<p>The warning threshold of the minimum free swap.</p> |`20` |
diff --git a/templates/os/windows_agent_active/template_os_windows_agent_active.yaml b/templates/os/windows_agent_active/template_os_windows_agent_active.yaml
index 3c20f5aedcf..879fe303398 100644
--- a/templates/os/windows_agent_active/template_os_windows_agent_active.yaml
+++ b/templates/os/windows_agent_active/template_os_windows_agent_active.yaml
@@ -1,6 +1,6 @@
zabbix_export:
version: '6.0'
- date: '2022-05-16T06:45:30Z'
+ date: '2022-06-01T13:19:08Z'
groups:
-
uuid: 846977d1dfed4968bc5f8bdb363285bc
@@ -1656,7 +1656,7 @@ zabbix_export:
description: 'This macro is used in Service discovery. Can be overridden on the host or linked template level.'
-
macro: '{$SERVICE.NAME.NOT_MATCHES}'
- value: '^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$'
+ value: '^(?:RemoteRegistry|MMCSS|gupdate|SysmonLog|clr_optimization_v.+|sppsvc|gpsvc|Pml Driver HPZ12|Net Driver HPZ12|MapsBroker|IntelAudioService|Intel\(R\) TPM Provisioning Service|dbupdate|DoSvc|CDPUserSvc_.+|WpnUserService_.+|OneSyncSvc_.+|WbioSrvc|BITS|tiledatamodelsvc|GISvc|ShellHWDetection|TrustedInstaller|TabletInputService|CDPSvc|wuauserv)$'
description: 'This macro is used in Service discovery. Can be overridden on the host or linked template level.'
-
macro: '{$SERVICE.STARTUPNAME.MATCHES}'
diff --git a/templates/san/hpe_msa2040_http/README.md b/templates/san/hpe_msa2040_http/README.md
new file mode 100644
index 00000000000..e76f83048c6
--- /dev/null
+++ b/templates/san/hpe_msa2040_http/README.md
@@ -0,0 +1,240 @@
+
+# HPE MSA 2040 Storage by HTTP
+
+## Overview
+
+For Zabbix version: 6.0 and higher
+The template to monitor HPE MSA 2040 by HTTP.
+It works without any external scripts and uses the script item.
+
+
+This template was tested on:
+
+- HPE MSA 2040 Storage
+
+## Setup
+
+> See [Zabbix template operation](https://www.zabbix.com/documentation/6.0/manual/config/templates_out_of_the_box/http) for basic instructions.
+
+1. Create user "zabbix" with monitor role on the storage.
+2. Link the template to a host.
+3. Configure {$HPE.MSA.API.PASSWORD} and an interface with address through which API is accessible.
+4. Change {$HPE.MSA.API.SCHEME} and {$HPE.MSA.API.PORT} macros if needed.
+
+
+## Zabbix configuration
+
+No specific Zabbix configuration is required.
+
+### Macros used
+
+|Name|Description|Default|
+|----|-----------|-------|
+|{$HPE.MSA.API.PASSWORD} |<p>Specify password for API.</p> |`` |
+|{$HPE.MSA.API.PORT} |<p>Connection port for API.</p> |`443` |
+|{$HPE.MSA.API.SCHEME} |<p>Connection scheme for API.</p> |`https` |
+|{$HPE.MSA.API.USERNAME} |<p>Specify user name for API.</p> |`zabbix` |
+|{$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT} |<p>The critical threshold of the CPU utilization in %.</p> |`90` |
+|{$HPE.MSA.DATA.TIMEOUT} |<p>Response timeout for API.</p> |`30s` |
+|{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT} |<p>The critical threshold of the disk group space utilization in %.</p> |`90` |
+|{$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN} |<p>The warning threshold of the disk group space utilization in %.</p> |`80` |
+|{$HPE.MSA.POOL.PUSED.MAX.CRIT} |<p>The critical threshold of the pool space utilization in %.</p> |`90` |
+|{$HPE.MSA.POOL.PUSED.MAX.WARN} |<p>The warning threshold of the pool space utilization in %.</p> |`80` |
+
+## Template links
+
+There are no template links in this template.
+
+## Discovery rules
+
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Controllers discovery |<p>Discover controllers.</p> |DEPENDENT |hpe.msa.controllers.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Disk groups discovery |<p>Discover disk groups.</p> |DEPENDENT |hpe.msa.disks.groups.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Disks discovery |<p>Discover disks.</p> |DEPENDENT |hpe.msa.disks.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Overrides:**</p><p>SSD life left<br> - {#TYPE} MATCHES_REGEX `8`<br> - ITEM_PROTOTYPE REGEXP `SSD life left` - DISCOVER</p> |
+|Enclosures discovery |<p>Discover enclosures.</p> |DEPENDENT |hpe.msa.enclosures.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Fans discovery |<p>Discover fans.</p> |DEPENDENT |hpe.msa.fans.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['fans']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|FRU discovery |<p>Discover FRU.</p> |DEPENDENT |hpe.msa.frus.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['frus']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Filter**:</p> <p>- {#TYPE} NOT_MATCHES_REGEX `^(POWER_SUPPLY|RAID_IOM|CHASSIS_MIDPLANE)$`</p> |
+|Pools discovery |<p>Discover pools.</p> |DEPENDENT |hpe.msa.pools.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Ports discovery |<p>Discover ports.</p> |DEPENDENT |hpe.msa.ports.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['ports']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Power supplies discovery |<p>Discover power supplies.</p> |DEPENDENT |hpe.msa.power_supplies.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Volumes discovery |<p>Discover volumes.</p> |DEPENDENT |hpe.msa.volumes.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volumes']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+
+## Items collected
+
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|HPE |Get method errors |<p>A list of method errors from API requests.</p> |DEPENDENT |hpe.msa.data.errors<p>**Preprocessing**:</p><p>- JSONPATH: `$.['errors']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Product ID |<p>The product model identifier.</p> |DEPENDENT |hpe.msa.system.product_id<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['product-id']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System contact |<p>The name of the person who administers the system.</p> |DEPENDENT |hpe.msa.system.contact<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['system-contact']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System information |<p>A brief description of what the system is used for or how it is configured.</p> |DEPENDENT |hpe.msa.system.info<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['system-information']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System location |<p>The location of the system.</p> |DEPENDENT |hpe.msa.system.location<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['system-location']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System name |<p>The name of the storage system.</p> |DEPENDENT |hpe.msa.system.name<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['system-name']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Vendor name |<p>The vendor name.</p> |DEPENDENT |hpe.msa.system.vendor_name<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['vendor-name']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System health |<p>System health status.</p> |DEPENDENT |hpe.msa.system.health<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['health-numeric']`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p> |
+|HPE |HPE MSA: Service ping |<p>Check if HTTP/HTTPS service accepts TCP connections.</p> |SIMPLE |net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Firmware version |<p>Storage controller firmware version.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",firmware]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['sc-fw'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Part number |<p>Part number of the controller.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",part_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['part-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Serial number |<p>Storage controller serial number.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Health |<p>Controller health status.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Status |<p>Storage controller status.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['status-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Disks |<p>Number of disks in the storage system.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",disks]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['disks'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Pools |<p>Number of pools in the storage system.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",pools]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['number-of-storage-pools'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Disk groups |<p>Number of disk groups in the storage system.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",disk_groups]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['virtual-disks'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: IP address |<p>Controller network port IP address.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",ip_address]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['ip-address'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache memory size |<p>Controller cache memory size.</p> |DEPENDENT |hpe.msa.controllers.cache["{#CONTROLLER.ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['cache-memory-size'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Write utilization |<p>Percentage of write cache in use, from 0 to 100.</p> |DEPENDENT |hpe.msa.controllers.cache.write["{#CONTROLLER.ID}",util]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['write-cache-used'].first()`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Read hits, rate |<p>For the controller that owns the volume, the number of times the block to be read is found in cache per second.</p> |DEPENDENT |hpe.msa.controllers.cache.read.hits["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['read-cache-hits'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Read misses, rate |<p>For the controller that owns the volume, the number of times the block to be read is not found in cache per second.</p> |DEPENDENT |hpe.msa.controllers.cache.read.misses["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['read-cache-misses'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Write hits, rate |<p>For the controller that owns the volume, the number of times the block written to is found in cache per second.</p> |DEPENDENT |hpe.msa.controllers.cache.write.hits["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['write-cache-hits'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Write misses, rate |<p>For the controller that owns the volume, the number of times the block written to is not found in cache per second.</p> |DEPENDENT |hpe.msa.controllers.cache.write.misses["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['write-cache-misses'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: CPU utilization |<p>Percentage of time the CPU is busy, from 0 to 100.</p> |DEPENDENT |hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['cpu-load'].first()`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: IOPS, total rate |<p>Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.controllers.iops.total["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['iops'].first()`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: IOPS, read rate |<p>Number of read operations per second.</p> |DEPENDENT |hpe.msa.controllers.iops.read["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['number-of-reads'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: IOPS, write rate |<p>Number of write operations per second.</p> |DEPENDENT |hpe.msa.controllers.iops.write["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['number-of-writes'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Data transfer rate: Total |<p>The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.controllers.data_transfer.total["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['bytes-per-second-numeric'].first()`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Data transfer rate: Reads |<p>The data read rate, in bytes per second.</p> |DEPENDENT |hpe.msa.controllers.data_transfer.reads["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['data-read-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Data transfer rate: Writes |<p>The data write rate, in bytes per second.</p> |DEPENDENT |hpe.msa.controllers.data_transfer.writes["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['data-written-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Uptime |<p>Number of seconds since the controller was restarted.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",uptime]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['power-on-time'].first()`</p> |
+|HPE |Disk group [{#NAME}]: Disks count |<p>Number of disks in the disk group.</p> |DEPENDENT |hpe.msa.disks.groups["{#NAME}",disk_count]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['diskcount'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Pool space used |<p>The percentage of pool capacity that the disk group occupies.</p> |DEPENDENT |hpe.msa.disks.groups.space["{#NAME}",pool_util]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['pool-percentage'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Health |<p>Disk group health.</p> |DEPENDENT |hpe.msa.disks.groups["{#NAME}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Space free |<p>The free space in the disk group.</p> |DEPENDENT |hpe.msa.disks.groups.space["{#NAME}",free]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['freespace-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `512`</p> |
+|HPE |Disk group [{#NAME}]: Space total |<p>The capacity of the disk group.</p> |DEPENDENT |hpe.msa.disks.groups.space["{#NAME}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['size-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `512`</p> |
+|HPE |Disk group [{#NAME}]: Space utilization |<p>The space utilization percentage in the disk group.</p> |CALCULATED |hpe.msa.disks.groups.space["{#NAME}",util]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`100-last(//hpe.msa.disks.groups.space["{#NAME}",free])/last(//hpe.msa.disks.groups.space["{#NAME}",total])*100` |
+|HPE |Disk group [{#NAME}]: RAID type |<p>The RAID level of the disk group.</p> |DEPENDENT |hpe.msa.disks.groups.raid["{#NAME}",type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['raidtype-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk group [{#NAME}]: Status |<p>The status of the disk group:</p><p>- CRIT: Critical. The disk group is online but isn't fault tolerant because some of it's disks are down.</p><p>- DMGD: Damaged. The disk group is online and fault tolerant, but some of it's disks are damaged.</p><p>- FTDN: Fault tolerant with a down disk.The disk group is online and fault tolerant, but some of it's disks are down.</p><p>- FTOL: Fault tolerant.</p><p>- MSNG: Missing. The disk group is online and fault tolerant, but some of it's disks are missing.</p><p>- OFFL: Offline. Either the disk group is using offline initialization, or it's disks are down and data may be lost.</p><p>- QTCR: Quarantined critical. The disk group is critical with at least one inaccessible disk. For example, two disks are inaccessible in a RAID 6 disk group or one disk is inaccessible for other fault-tolerant RAID levels. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.</p><p>- QTDN: Quarantined with a down disk. The RAID6 disk group has one inaccessible disk. The disk group is fault tolerant but degraded. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.</p><p>- QTOF: Quarantined offline. The disk group is offline with multiple inaccessible disks causing user data to be incomplete, or is an NRAID or RAID 0 disk group.</p><p>- QTUN: Quarantined unsupported. The disk group contains data in a format that is not supported by this system. For example, this system does not support linear disk groups.</p><p>- STOP: The disk group is stopped.</p><p>- UNKN: Unknown.</p><p>- UP: Up. The disk group is online and does not have fault-tolerant attributes.</p> |DEPENDENT |hpe.msa.disks.groups["{#NAME}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['status-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: IOPS, total rate |<p>Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.disks.groups.iops.total["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['iops'].first()`</p> |
+|HPE |Disk group [{#NAME}]: Average response time: Total |<p>Average response time for read and write operations, calculated over the interval since these statistics were last requested or reset.</p> |DEPENDENT |hpe.msa.disks.groups.avg_rsp_time["{#NAME}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['avg-rsp-time'].first()`</p><p>- MULTIPLIER: `0.000001`</p> |
+|HPE |Disk group [{#NAME}]: Average response time: Read |<p>Average response time for all read operations, calculated over the interval since these statistics were last requested or reset.</p> |DEPENDENT |hpe.msa.disks.groups.avg_rsp_time["{#NAME}",read]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['avg-read-rsp-time'].first()`</p><p>- MULTIPLIER: `0.000001`</p> |
+|HPE |Disk group [{#NAME}]: Average response time: Write |<p>Average response time for all write operations, calculated over the interval since these statistics were last requested or reset.</p> |DEPENDENT |hpe.msa.disks.groups.avg_rsp_time["{#NAME}",write]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['avg-write-rsp-time'].first()`</p><p>- MULTIPLIER: `0.000001`</p> |
+|HPE |Disk group [{#NAME}]: IOPS, read rate |<p>Number of read operations per second.</p> |DEPENDENT |hpe.msa.disks.groups.iops.read["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['number-of-reads'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Disk group [{#NAME}]: IOPS, write rate |<p>Number of write operations per second.</p> |DEPENDENT |hpe.msa.disks.groups.iops.write["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['number-of-writes'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Disk group [{#NAME}]: Data transfer rate: Total |<p>The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.disks.groups.data_transfer.total["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['bytes-per-second-numeric'].first()`</p> |
+|HPE |Disk group [{#NAME}]: Data transfer rate: Reads |<p>The data read rate, in bytes per second.</p> |DEPENDENT |hpe.msa.disks.groups.data_transfer.reads["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['data-read-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Disk group [{#NAME}]: Data transfer rate: Writes |<p>The data write rate, in bytes per second.</p> |DEPENDENT |hpe.msa.disks.groups.data_transfer.writes["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['data-written-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Pool [{#NAME}]: Health |<p>Pool health.</p> |DEPENDENT |hpe.msa.pools["{#NAME}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools'][?(@['name'] == "{#NAME}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Pool [{#NAME}]: Space free |<p>The free space in the pool.</p> |DEPENDENT |hpe.msa.pools.space["{#NAME}",free]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools'][?(@['name'] == "{#NAME}")].['total-avail-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `512`</p> |
+|HPE |Pool [{#NAME}]: Space total |<p>The capacity of the pool.</p> |DEPENDENT |hpe.msa.pools.space["{#NAME}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools'][?(@['name'] == "{#NAME}")].['total-size-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `512`</p> |
+|HPE |Pool [{#NAME}]: Space utilization |<p>The space utilization percentage in the pool.</p> |CALCULATED |hpe.msa.pools.space["{#NAME}",util]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`100-last(//hpe.msa.pools.space["{#NAME}",free])/last(//hpe.msa.pools.space["{#NAME}",total])*100` |
+|HPE |Volume [{#NAME}]: Space allocated |<p>The amount of space currently allocated to the volume.</p> |DEPENDENT |hpe.msa.volumes.space["{#NAME}",allocated]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volumes'][?(@['volume-name'] == "{#NAME}")].['allocated-size-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `512`</p> |
+|HPE |Volume [{#NAME}]: Space total |<p>The capacity of the volume.</p> |DEPENDENT |hpe.msa.volumes.space["{#NAME}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volumes'][?(@['volume-name'] == "{#NAME}")].['size-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `512`</p> |
+|HPE |Volume [{#NAME}]: IOPS, total rate |<p>Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.volumes.iops.total["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['iops'].first()`</p> |
+|HPE |Volume [{#NAME}]: IOPS, read rate |<p>Number of read operations per second.</p> |DEPENDENT |hpe.msa.volumes.iops.read["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['number-of-reads'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: IOPS, write rate |<p>Number of write operations per second.</p> |DEPENDENT |hpe.msa.volumes.iops.write["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['number-of-writes'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Data transfer rate: Total |<p>The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.volumes.data_transfer.total["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['bytes-per-second-numeric'].first()`</p> |
+|HPE |Volume [{#NAME}]: Data transfer rate: Reads |<p>The data read rate, in bytes per second.</p> |DEPENDENT |hpe.msa.volumes.data_transfer.reads["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['data-read-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Data transfer rate: Writes |<p>The data write rate, in bytes per second.</p> |DEPENDENT |hpe.msa.volumes.data_transfer.writes["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['data-written-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Cache: Read hits, rate |<p>For the controller that owns the volume, the number of times the block to be read is found in cache per second.</p> |DEPENDENT |hpe.msa.volumes.cache.read.hits["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['read-cache-hits'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Cache: Read misses, rate |<p>For the controller that owns the volume, the number of times the block to be read is not found in cache per second.</p> |DEPENDENT |hpe.msa.volumes.cache.read.misses["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['read-cache-misses'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Cache: Write hits, rate |<p>For the controller that owns the volume, the number of times the block written to is found in cache per second.</p> |DEPENDENT |hpe.msa.volumes.cache.write.hits["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['write-cache-hits'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Cache: Write misses, rate |<p>For the controller that owns the volume, the number of times the block written to is not found in cache per second.</p> |DEPENDENT |hpe.msa.volumes.cache.write.misses["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['write-cache-misses'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Health |<p>Enclosure health.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Status |<p>Enclosure status.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['status-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 6`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Midplane serial number |<p>Midplane serial number.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",midplane_serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['midplane-serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Part number |<p>Enclosure part number.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",part_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['part-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Model |<p>Enclosure model.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",model]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['model'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Power |<p>Enclosure power in watts.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",power]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['enclosure-power'].first()`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Health |<p>Power supply health status.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Status |<p>Power supply status.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['status-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Part number |<p>Power supply part number.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",part_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['part-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Serial number |<p>Power supply serial number.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Temperature |<p>Power supply temperature.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",temperature]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['dctemp'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Port [{#NAME}]: Health |<p>Port health status.</p> |DEPENDENT |hpe.msa.ports["{#NAME}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['ports'][?(@['port'] == "{#NAME}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Port [{#NAME}]: Status |<p>Port status.</p> |DEPENDENT |hpe.msa.ports["{#NAME}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['ports'][?(@['port'] == "{#NAME}")].['status-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Port [{#NAME}]: Type |<p>Port type.</p> |DEPENDENT |hpe.msa.ports["{#NAME}",type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['ports'][?(@['port'] == "{#NAME}")].['port-type-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Fan [{#DURABLE.ID}]: Health |<p>Fan health status.</p> |DEPENDENT |hpe.msa.fans["{#DURABLE.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['fans'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Fan [{#DURABLE.ID}]: Status |<p>Fan status.</p> |DEPENDENT |hpe.msa.fans["{#DURABLE.ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['fans'][?(@['durable-id'] == "{#DURABLE.ID}")].['status-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Fan [{#DURABLE.ID}]: Speed |<p>Fan speed (revolutions per minute).</p> |DEPENDENT |hpe.msa.fans["{#DURABLE.ID}",speed]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['fans'][?(@['durable-id'] == "{#DURABLE.ID}")].['speed'].first()`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Health |<p>Disk health status.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Temperature status |<p>Disk temperature status.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",temperature_status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['temperature-status-numeric'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- IN_RANGE: `1 3`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Temperature |<p>Temperature of the disk.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",temperature]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['temperature-numeric'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Type |<p>Disk type:</p><p>SAS: Enterprise SAS spinning disk.</p><p>SAS MDL: Midline SAS spinning disk.</p><p>SSD SAS: SAS solit-state disk.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['description-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Disk group |<p>If the disk is in a disk group, the disk group name.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",group]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['disk-group'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Storage pool |<p>If the disk is in a pool, the pool name.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",pool]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['storage-pool-name'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Vendor |<p>Disk vendor.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",vendor]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['vendor'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Model |<p>Disk model.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",model]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['model'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Serial number |<p>Disk serial number.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Space total |<p>Total size of the disk.</p> |DEPENDENT |hpe.msa.disks.space["{#DURABLE.ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['size-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `512`</p> |
+|HPE |Disk [{#DURABLE.ID}]: SSD life left |<p>The percantage of disk life remaining.</p> |DEPENDENT |hpe.msa.disks.ssd["{#DURABLE.ID}",life_left]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['ssd-life-left-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Status |<p>{#DESCRIPTION}. FRU status:</p><p>Absent: Component is not present.</p><p>Fault: At least one subcomponent has a fault.</p><p>Invalid data: For a power supply module, the EEPROM is improperly programmed.</p><p>OK: All subcomponents are operating normally.</p><p>Not available: Status is not available.</p> |DEPENDENT |hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['frus'][?(@['name'] == "{#TYPE}" && @['fru-location'] == "{#LOCATION}")].['fru-status'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|HPE |FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Part number |<p>{#DESCRIPTION}. Part number of the FRU.</p> |DEPENDENT |hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",part_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['frus'][?(@['name'] == "{#TYPE}" && @['fru-location'] == "{#LOCATION}")].['part-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Serial number |<p>{#DESCRIPTION}. FRU serial number.</p> |DEPENDENT |hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['frus'][?(@['name'] == "{#TYPE}" && @['fru-location'] == "{#LOCATION}")].['serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Zabbix raw items |HPE MSA: Get data |<p>The JSON with result of API requests.</p> |SCRIPT |hpe.msa.data.get<p>**Expression**:</p>`The text is too long. Please see the template.` |
+
+## Triggers
+
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|There are errors in method requests to API |<p>There are errors in method requests to API.</p> |`length(last(/HPE MSA 2040 Storage by HTTP/hpe.msa.data.errors))>0` |AVERAGE |<p>**Depends on**:</p><p>- Service is down or unavailable</p> |
+|System health is in degraded state |<p>System health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.system.health)=1` |WARNING | |
+|System health is in fault state |<p>System health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.system.health)=2` |AVERAGE | |
+|System health is in unknown state |<p>System health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.system.health)=3` |INFO | |
+|Service is down or unavailable |<p>HTTP/HTTPS service is down or unable to establish TCP connection.</p> |`max(/HPE MSA 2040 Storage by HTTP/net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"],5m)=0` |HIGH | |
+|Controller [{#CONTROLLER.ID}]: Controller health is in degraded state |<p>Controller health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=1` |WARNING |<p>**Depends on**:</p><p>- Controller [{#CONTROLLER.ID}]: Controller is down</p> |
+|Controller [{#CONTROLLER.ID}]: Controller health is in fault state |<p>Controller health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=2` |AVERAGE |<p>**Depends on**:</p><p>- Controller [{#CONTROLLER.ID}]: Controller is down</p> |
+|Controller [{#CONTROLLER.ID}]: Controller health is in unknown state |<p>Controller health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=3` |INFO |<p>**Depends on**:</p><p>- Controller [{#CONTROLLER.ID}]: Controller is down</p> |
+|Controller [{#CONTROLLER.ID}]: Controller is down |<p>The controller is down.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1` |HIGH | |
+|Controller [{#CONTROLLER.ID}]: High CPU utilization |<p>Controller CPU utilization is too high. The system might be slow to respond.</p> |`min(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util],5m)>{$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT}` |WARNING | |
+|Controller [{#CONTROLLER.ID}]: Controller has been restarted |<p>The controller uptime is less than 10 minutes.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",uptime])<10m` |WARNING | |
+|Disk group [{#NAME}]: Disk group health is in degraded state |<p>Disk group health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=1` |WARNING | |
+|Disk group [{#NAME}]: Disk group health is in fault state |<p>Disk group health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=2` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group health is in unknown state |<p>Disk group health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=3` |INFO | |
+|Disk group [{#NAME}]: Disk group space is low |<p>Disk group is running low on free space (less than {$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}% available).</p> |`min(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}` |WARNING |<p>**Depends on**:</p><p>- Disk group [{#NAME}]: Disk group space is critically low</p> |
+|Disk group [{#NAME}]: Disk group space is critically low |<p>Disk group is running low on free space (less than {$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}% available).</p> |`min(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is fault tolerant with a down disk |<p>The disk group is online and fault tolerant, but some of it's disks are down.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=1` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group has damaged disks |<p>The disk group is online and fault tolerant, but some of it's disks are damaged.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=9` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group has missing disks |<p>The disk group is online and fault tolerant, but some of it's disks are missing.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=8` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is offline |<p>Either the disk group is using offline initialization, or it's disks are down and data may be lost.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=3` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is quarantined critical |<p>The disk group is critical with at least one inaccessible disk. For example, two disks are inaccessible in a RAID 6 disk group or one disk is inaccessible for other fault-tolerant RAID levels. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=4` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is quarantined offline |<p>The disk group is offline with multiple inaccessible disks causing user data to be incomplete, or is an NRAID or RAID 0 disk group.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=5` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is quarantined unsupported |<p>The disk group contains data in a format that is not supported by this system. For example, this system does not support linear disk groups.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=5` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is quarantined with an inaccessible disk |<p>The RAID6 disk group has one inaccessible disk. The disk group is fault tolerant but degraded. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=6` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is stopped |<p>The disk group is stopped.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=7` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group status is critical |<p>The disk group is online but isn't fault tolerant because some of its disks are down.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=2` |AVERAGE | |
+|Pool [{#NAME}]: Pool health is in degraded state |<p>Pool health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=1` |WARNING | |
+|Pool [{#NAME}]: Pool health is in fault state |<p>Pool health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=2` |AVERAGE | |
+|Pool [{#NAME}]: Pool health is in unknown state |<p>Pool [{#NAME}] health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=3` |INFO | |
+|Pool [{#NAME}]: Pool space is low |<p>Pool is running low on free space (less than {$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}% available).</p> |`min(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}` |WARNING |<p>**Depends on**:</p><p>- Pool [{#NAME}]: Pool space is critically low</p> |
+|Pool [{#NAME}]: Pool space is critically low |<p>Pool is running low on free space (less than {$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}% available).</p> |`min(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}` |AVERAGE | |
+|Enclosure [{#DURABLE.ID}]: Enclosure health is in degraded state |<p>Enclosure health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=1` |WARNING | |
+|Enclosure [{#DURABLE.ID}]: Enclosure health is in fault state |<p>Enclosure health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=2` |AVERAGE | |
+|Enclosure [{#DURABLE.ID}]: Enclosure health is in unknown state |<p>Enclosure health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=3` |INFO | |
+|Enclosure [{#DURABLE.ID}]: Enclosure has critical status |<p>Enclosure has critical status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=2` |HIGH | |
+|Enclosure [{#DURABLE.ID}]: Enclosure has warning status |<p>Enclosure has warning status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=3` |WARNING | |
+|Enclosure [{#DURABLE.ID}]: Enclosure is unavailable |<p>Enclosure is unavailable.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=7` |HIGH | |
+|Enclosure [{#DURABLE.ID}]: Enclosure is unrecoverable |<p>Enclosure is unrecoverable.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=4` |HIGH | |
+|Enclosure [{#DURABLE.ID}]: Enclosure has unknown status |<p>Enclosure has unknown status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=6` |INFO | |
+|Power supply [{#DURABLE.ID}]: Power supply health is in degraded state |<p>Power supply health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=1` |WARNING | |
+|Power supply [{#DURABLE.ID}]: Power supply health is in fault state |<p>Power supply health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=2` |AVERAGE | |
+|Power supply [{#DURABLE.ID}]: Power supply health is in unknown state |<p>Power supply health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=3` |INFO | |
+|Power supply [{#DURABLE.ID}]: Power supply has error status |<p>Power supply has error status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=2` |AVERAGE | |
+|Power supply [{#DURABLE.ID}]: Power supply has warning status |<p>Power supply has warning status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=1` |WARNING | |
+|Power supply [{#DURABLE.ID}]: Power supply has unknown status |<p>Power supply has unknown status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=4` |INFO | |
+|Port [{#NAME}]: Port health is in degraded state |<p>Port health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=1` |WARNING | |
+|Port [{#NAME}]: Port health is in fault state |<p>Port health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=2` |AVERAGE | |
+|Port [{#NAME}]: Port health is in unknown state |<p>Port health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=3` |INFO | |
+|Port [{#NAME}]: Port has error status |<p>Port has error status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=2` |AVERAGE | |
+|Port [{#NAME}]: Port has warning status |<p>Port has warning status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=1` |WARNING | |
+|Port [{#NAME}]: Port has unknown status |<p>Port has unknown status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=4` |INFO | |
+|Fan [{#DURABLE.ID}]: Fan health is in degraded state |<p>Fan health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=1` |WARNING | |
+|Fan [{#DURABLE.ID}]: Fan health is in fault state |<p>Fan health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=2` |AVERAGE | |
+|Fan [{#DURABLE.ID}]: Fan health is in unknown state |<p>Fan health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=3` |INFO | |
+|Fan [{#DURABLE.ID}]: Fan has error status |<p>Fan has error status.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=1` |AVERAGE | |
+|Fan [{#DURABLE.ID}]: Fan is missing |<p>Fan is missing.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=3` |INFO | |
+|Fan [{#DURABLE.ID}]: Fan is off |<p>Fan is off.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=2` |WARNING | |
+|Disk [{#DURABLE.ID}]: Disk health is in degraded state |<p>Disk health is in degraded state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=1` |WARNING | |
+|Disk [{#DURABLE.ID}]: Disk health is in fault state |<p>Disk health is in fault state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=2` |AVERAGE | |
+|Disk [{#DURABLE.ID}]: Disk health is in unknown state |<p>Disk health is in unknown state.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=3` |INFO | |
+|Disk [{#DURABLE.ID}]: Disk temperature is high |<p>Disk temperature is high.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=3` |WARNING | |
+|Disk [{#DURABLE.ID}]: Disk temperature is critically high |<p>Disk temperature is critically high.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=2` |AVERAGE | |
+|Disk [{#DURABLE.ID}]: Disk temperature is unknown |<p>Disk temperature is unknown.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=4` |INFO | |
+|FRU [{#ENCLOSURE.ID}: {#LOCATION}]: FRU status is Degraded or Fault |<p>FRU status is Degraded or Fault.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status])=1` |AVERAGE | |
+|FRU [{#ENCLOSURE.ID}: {#LOCATION}]: FRU ID data is invalid |<p>The FRU ID data is invalid. The FRU's EEPROM is improperly programmed.</p> |`last(/HPE MSA 2040 Storage by HTTP/hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status])=0` |WARNING | |
+
+## Feedback
+
+Please report any issues with the template at https://support.zabbix.com
+
+You can also provide feedback, discuss the template or ask for help with it at [ZABBIX forums](https://www.zabbix.com/forum/zabbix-suggestions-and-feedback).
+
diff --git a/templates/san/hpe_msa2040_http/template_san_hpe_msa2040_http.yaml b/templates/san/hpe_msa2040_http/template_san_hpe_msa2040_http.yaml
new file mode 100644
index 00000000000..e28b8ae6fd9
--- /dev/null
+++ b/templates/san/hpe_msa2040_http/template_san_hpe_msa2040_http.yaml
@@ -0,0 +1,4417 @@
+zabbix_export:
+ version: '6.0'
+ date: '2022-06-16T07:39:49Z'
+ groups:
+ -
+ uuid: 7c2cb727f85b492d88cd56e17127c64d
+ name: Templates/SAN
+ templates:
+ -
+ uuid: be10b1140fce4cc08247260b71bcd037
+ template: 'HPE MSA 2040 Storage by HTTP'
+ name: 'HPE MSA 2040 Storage by HTTP'
+ description: |
+ The template to monitor HPE MSA 2040 by HTTP.
+ It works without any external scripts and uses the script item.
+
+ Setup:
+ 1. Create user "zabbix" with monitor role on the storage.
+ 2. Link the template to a host.
+ 3. Configure {$HPE.MSA.API.PASSWORD} and an interface with address through which API is accessible.
+ 4. Change {$HPE.MSA.API.SCHEME} and {$HPE.MSA.API.PORT} macros if needed.
+
+ You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
+
+ Template tooling version used: 0.41
+ groups:
+ -
+ name: Templates/SAN
+ items:
+ -
+ uuid: 51d0ae1b4663471d868c27ccd2fb4fed
+ name: 'Get method errors'
+ type: DEPENDENT
+ key: hpe.msa.data.errors
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: TEXT
+ description: 'A list of method errors from API requests.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''errors'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: errors
+ triggers:
+ -
+ uuid: 7f80562a0b4f4329be454c418de3f517
+ expression: 'length(last(/HPE MSA 2040 Storage by HTTP/hpe.msa.data.errors))>0'
+ name: 'There are errors in method requests to API'
+ priority: AVERAGE
+ description: 'There are errors in method requests to API.'
+ dependencies:
+ -
+ name: 'Service is down or unavailable'
+ expression: 'max(/HPE MSA 2040 Storage by HTTP/net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"],5m)=0'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: e07e09dbcdd44f509a06343c9a53a455
+ name: 'HPE MSA: Get data'
+ type: SCRIPT
+ key: hpe.msa.data.get
+ history: '0'
+ trends: '0'
+ value_type: TEXT
+ params: |
+ var params = JSON.parse(value),
+ fields = ['username', 'password', 'base_url'],
+ methods = [
+ 'system',
+ 'controllers',
+ 'controller-statistics',
+ 'frus',
+ 'disk-groups',
+ 'disk-group-statistics',
+ 'disks',
+ 'enclosures',
+ 'fans',
+ 'pools',
+ 'ports',
+ 'power-supplies',
+ 'volumes',
+ 'volume-statistics'
+ ],
+ data_tmp = {},
+ result_tmp = {},
+ session_key,
+ data = {};
+
+ fields.forEach(function (field) {
+ if (typeof params !== 'object' || typeof params[field] === 'undefined' || params[field] === '' ) {
+ throw 'Required param is not set: "' + field + '".';
+ }
+ });
+
+ if (!params.base_url.endsWith('/')) {
+ params.base_url += '/';
+ }
+
+ var response, request = new HttpRequest();
+ request.addHeader('datatype: xml');
+
+ auth_string = md5(params.username + '_' + params.password);
+ response = request.get(params.base_url + 'api/login/' + auth_string);
+
+ if (request.getStatus() < 200 || request.getStatus() >= 300) {
+ throw 'Authentication request has failed with status code ' + request.getStatus() + ': ' + response;
+ }
+
+ if (response !== null) {
+ try {
+ session_key = XML.query(response, '/RESPONSE/OBJECT/PROPERTY[@name="response"]/text()');
+ return_code = XML.query(response, '/RESPONSE/OBJECT/PROPERTY[@name="return-code"]/text()');
+ }
+ catch (error) {
+ throw 'Failed to parse authentication response received from device API.';
+ }
+ }
+
+ if (return_code != '1') {
+ throw 'Authentication failed.'
+ }
+ else if (session_key === '') {
+ throw 'Failed to retrieve session key from authentication response.';
+ }
+
+ request.clearHeader();
+ request.addHeader('sessionKey: ' + session_key);
+ request.addHeader('datatype: api-embed');
+
+ data.errors = [];
+
+ methods.forEach(function (method) {
+ response = request.get(params.base_url + 'api/show/' + method);
+ method_error = '';
+
+ if (request.getStatus() < 200 || request.getStatus() >= 300) {
+ method_error = 'Method: ' + method + '. Request has failed with status code ' + request.getStatus() + ': ' + response;
+ data.errors.push(method_error);
+ return;
+ }
+
+ if (response !== null) {
+ try {
+ result_tmp = JSON.parse(XML.toJson(response));
+ data[method] = [];
+
+ result_tmp.RESPONSE.OBJECT.forEach(function (object) {
+ var data_tmp = {};
+
+ if (object['@basetype'] !== 'status' && object['@basetype'] !== 'enclosure-sku') {
+ object.PROPERTY.forEach(function (property) {
+ name = property['@name'];
+ value = property['#text'] || '';
+ data_tmp[name] = value;
+ });
+
+ if (method == 'controller-statistics') {
+ data_tmp['durable-id'] = data_tmp['durable-id'].toLowerCase();
+ }
+
+ data[method].push(data_tmp);
+ }
+ });
+ }
+ catch (error) {
+ method_error = 'Method: ' + method + '. Failed to parse response received from device API.';
+ }
+ }
+ else {
+ method_error = 'Method: ' + method + '. No data received by request.';
+ }
+
+ if (method_error.length > 0) {
+ data.errors.push(method_error);
+ }
+ });
+
+ if (data.errors.length == 0) {
+ data.errors = '';
+ }
+
+ response = request.get(params.base_url + 'api/exit');
+
+ if (request.getStatus() < 200 || request.getStatus() >= 300) {
+ throw 'Logout request failed with status code ' + request.getStatus() + ': ' + response;
+ }
+
+ return JSON.stringify(data);
+ description: 'The JSON with result of API requests.'
+ timeout: '{$HPE.MSA.DATA.TIMEOUT}'
+ parameters:
+ -
+ name: base_url
+ value: '{$HPE.MSA.API.SCHEME}://{HOST.CONN}:{$HPE.MSA.API.PORT}/'
+ -
+ name: username
+ value: '{$HPE.MSA.API.USERNAME}'
+ -
+ name: password
+ value: '{$HPE.MSA.API.PASSWORD}'
+ tags:
+ -
+ tag: component
+ value: raw
+ -
+ uuid: 802692ec1429407a8bbb55e338959c0b
+ name: 'System contact'
+ type: DEPENDENT
+ key: hpe.msa.system.contact
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The name of the person who administers the system.'
+ inventory_link: CONTACT
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''system-contact'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 4516edee03084515bcf139c22abc4c7c
+ name: 'System health'
+ type: DEPENDENT
+ key: hpe.msa.system.health
+ delay: '0'
+ history: 7d
+ description: 'System health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''health-numeric'']'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: system
+ triggers:
+ -
+ uuid: ee37a443b22a4161a88014a0c32dfdfa
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.system.health)=1'
+ name: 'System health is in degraded state'
+ priority: WARNING
+ description: 'System health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 54472b6cdf84418baf10b4a7d5e16e5c
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.system.health)=2'
+ name: 'System health is in fault state'
+ priority: AVERAGE
+ description: 'System health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: ccb821dafad1404dbc1873561a69b7cc
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.system.health)=3'
+ name: 'System health is in unknown state'
+ priority: INFO
+ description: 'System health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 6b82f7545a334f9cad752bd18f8886bc
+ name: 'System information'
+ type: DEPENDENT
+ key: hpe.msa.system.info
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'A brief description of what the system is used for or how it is configured.'
+ inventory_link: NOTES
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''system-information'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: c5f082947e844adbbcf2982ad9c0c76e
+ name: 'System location'
+ type: DEPENDENT
+ key: hpe.msa.system.location
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The location of the system.'
+ inventory_link: LOCATION
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''system-location'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 419165bfe80f46f7af1c5d6ab46c1f14
+ name: 'System name'
+ type: DEPENDENT
+ key: hpe.msa.system.name
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The name of the storage system.'
+ inventory_link: NAME
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''system-name'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 79c87a81895f46658f2e902cf7166860
+ name: 'Product ID'
+ type: DEPENDENT
+ key: hpe.msa.system.product_id
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The product model identifier.'
+ inventory_link: MODEL
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''product-id'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 947bb21483e747c9ad13b995b79289c0
+ name: 'Vendor name'
+ type: DEPENDENT
+ key: hpe.msa.system.vendor_name
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The vendor name.'
+ inventory_link: VENDOR
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''vendor-name'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: d1242f5aede14008ae6896123bb944a5
+ name: 'HPE MSA: Service ping'
+ type: SIMPLE
+ key: 'net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"]'
+ history: 7d
+ description: 'Check if HTTP/HTTPS service accepts TCP connections.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: network
+ triggers:
+ -
+ uuid: b8d07373a0fb4051a0534891b255994a
+ expression: 'max(/HPE MSA 2040 Storage by HTTP/net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"],5m)=0'
+ name: 'Service is down or unavailable'
+ priority: HIGH
+ description: 'HTTP/HTTPS service is down or unable to establish TCP connection.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ discovery_rules:
+ -
+ uuid: 66eabcbe564644dea3427afcbf76b87c
+ name: 'Controllers discovery'
+ type: DEPENDENT
+ key: hpe.msa.controllers.discovery
+ delay: '0'
+ description: 'Discover controllers.'
+ item_prototypes:
+ -
+ uuid: 53b0ea51add74c629814c881ac824d1b
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Read hits, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.read.hits["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block to be read is found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''read-cache-hits''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 23ed270bc823484cb514600bf23b2aa5
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Read misses, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.read.misses["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block to be read is not found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''read-cache-misses''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 71a92c76ae7740cd9e58ea337f4a75e3
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Write hits, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.write.hits["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block written to is found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''write-cache-hits''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: bafcf98cee9c4a8da0aea7b39a5242d4
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Write misses, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.write.misses["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block written to is not found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''write-cache-misses''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: fa9400f2dcba40f4b57dfcef6f7856a0
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Write utilization'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.write["{#CONTROLLER.ID}",util]'
+ delay: '0'
+ history: 7d
+ units: '%'
+ description: 'Percentage of write cache in use, from 0 to 100.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''write-cache-used''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 38a6ca0447d548c593d08acf377250cb
+ name: 'Controller [{#CONTROLLER.ID}]: Cache memory size'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache["{#CONTROLLER.ID}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Controller cache memory size.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''cache-memory-size''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: cfff8c77d99440d18794e1c6dbf738ad
+ name: 'Controller [{#CONTROLLER.ID}]: CPU utilization'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util]'
+ delay: '0'
+ history: 7d
+ units: '%'
+ description: 'Percentage of time the CPU is busy, from 0 to 100.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''cpu-load''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ trigger_prototypes:
+ -
+ uuid: b94f1cfd6e6a48f8a18c644532b7a9c8
+ expression: 'min(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util],5m)>{$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT}'
+ name: 'Controller [{#CONTROLLER.ID}]: High CPU utilization'
+ event_name: 'Controller [{#CONTROLLER.ID}]: High CPU utilization (over {$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT}% for 5m)'
+ priority: WARNING
+ description: 'Controller CPU utilization is too high. The system might be slow to respond.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: c87dc81f4a3447f3962a69a8b0d79769
+ name: 'Controller [{#CONTROLLER.ID}]: Data transfer rate: Reads'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.data_transfer.reads["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data read rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''data-read-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 7c34d1c4fd784fb695d9fc7c5a686329
+ name: 'Controller [{#CONTROLLER.ID}]: Data transfer rate: Total'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.data_transfer.total["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ units: Bps
+ description: 'The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''bytes-per-second-numeric''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 93b508f92de04dfbbfe7099bf37796ce
+ name: 'Controller [{#CONTROLLER.ID}]: Data transfer rate: Writes'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.data_transfer.writes["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data write rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''data-written-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 3d7f1a97cd8249efbabc2402006c1cc2
+ name: 'Controller [{#CONTROLLER.ID}]: IOPS, read rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.iops.read["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!r/s'
+ description: 'Number of read operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''number-of-reads''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 8bf0601293a64628be08d16391d1e11b
+ name: 'Controller [{#CONTROLLER.ID}]: IOPS, total rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.iops.total["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ units: '!iops'
+ description: 'Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''iops''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 6444038b72294992ab17c126ccbe7251
+ name: 'Controller [{#CONTROLLER.ID}]: IOPS, write rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.iops.write["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!w/s'
+ description: 'Number of write operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''number-of-writes''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 5940d26205924a13ba351f5d56192fcb
+ name: 'Controller [{#CONTROLLER.ID}]: Disks'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",disks]'
+ delay: '0'
+ history: 7d
+ description: 'Number of disks in the storage system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''disks''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 94c2c9bfd2414875a53fbe94f6230666
+ name: 'Controller [{#CONTROLLER.ID}]: Disk groups'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",disk_groups]'
+ delay: '0'
+ history: 7d
+ description: 'Number of disk groups in the storage system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''virtual-disks''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 5a987843b14c4d25a1fde4429015f773
+ name: 'Controller [{#CONTROLLER.ID}]: Firmware version'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",firmware]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Storage controller firmware version.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''sc-fw''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 6d2a84b6b1804082ab4ef3451a52b552
+ name: 'Controller [{#CONTROLLER.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Controller health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: component
+ value: health
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ trigger_prototypes:
+ -
+ uuid: 381a5fe2adfd4f4ea15763cdf0a1bd0d
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=1'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller health is in degraded state'
+ priority: WARNING
+ description: 'Controller health is in degraded state.'
+ dependencies:
+ -
+ name: 'Controller [{#CONTROLLER.ID}]: Controller is down'
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 2082d12ff9c54a5ea709dba05c14ae00
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=2'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller health is in fault state'
+ priority: AVERAGE
+ description: 'Controller health is in fault state.'
+ dependencies:
+ -
+ name: 'Controller [{#CONTROLLER.ID}]: Controller is down'
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 0b2ed99c47a64210b198cc0a3a6b84b5
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=3'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller health is in unknown state'
+ priority: INFO
+ description: 'Controller health is in unknown state.'
+ dependencies:
+ -
+ name: 'Controller [{#CONTROLLER.ID}]: Controller is down'
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 5f00490ddd22458b93add06ed24a9f96
+ name: 'Controller [{#CONTROLLER.ID}]: IP address'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",ip_address]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Controller network port IP address.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''ip-address''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 33e754d5acb84b7c86b2e23b122e6eed
+ name: 'Controller [{#CONTROLLER.ID}]: Part number'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",part_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Part number of the controller.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''part-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: e4930566c3844f9487e343c203f3eb96
+ name: 'Controller [{#CONTROLLER.ID}]: Pools'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",pools]'
+ delay: '0'
+ history: 7d
+ description: 'Number of pools in the storage system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''number-of-storage-pools''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: c073adb77eb84cf79e1e1693d9378d47
+ name: 'Controller [{#CONTROLLER.ID}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Storage controller serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: a2be1b4b814d45b18bb4e313818511d6
+ name: 'Controller [{#CONTROLLER.ID}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Storage controller status.'
+ valuemap:
+ name: 'Controller status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''status-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: component
+ value: health
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ trigger_prototypes:
+ -
+ uuid: 1524e80a37cb4b64a7360488e132a433
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller is down'
+ priority: HIGH
+ description: 'The controller is down.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: df2bede9ea85483581a35a45a15d4de4
+ name: 'Controller [{#CONTROLLER.ID}]: Uptime'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",uptime]'
+ delay: '0'
+ history: 7d
+ units: uptime
+ description: 'Number of seconds since the controller was restarted.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''power-on-time''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ trigger_prototypes:
+ -
+ uuid: 136bb1ccd4114a529a99ddbf803fd974
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",uptime])<10m'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller has been restarted'
+ event_name: 'Controller [{#CONTROLLER.ID}]: Controller has been restarted (uptime < 10m)'
+ priority: WARNING
+ description: 'The controller uptime is less than 10 minutes.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ graph_prototypes:
+ -
+ uuid: 93aeac1a193e43d3a93a3892bd26b0ff
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Write utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.write["{#CONTROLLER.ID}",util]'
+ -
+ uuid: a7432b24cd834aa0be9dec3935641dfb
+ name: 'Controller [{#CONTROLLER.ID}]: Cache usage'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.read.hits["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.read.misses["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.write.hits["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.write.misses["{#CONTROLLER.ID}",rate]'
+ -
+ uuid: fca4007d4dd1491dbceba1644b50e1b5
+ name: 'Controller [{#CONTROLLER.ID}]: Controller CPU utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util]'
+ -
+ uuid: 0b2598db582546308d092c9e7889e698
+ name: 'Controller [{#CONTROLLER.ID}]: Data transfer rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.data_transfer.reads["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.data_transfer.writes["{#CONTROLLER.ID}",rate]'
+ -
+ uuid: 0793bb861e874a2c8e7e60a4c40bc34e
+ name: 'Controller [{#CONTROLLER.ID}]: Disk operations rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.iops.read["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.controllers.iops.write["{#CONTROLLER.ID}",rate]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#CONTROLLER.ID}'
+ path: '$.[''controller-id'']'
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 16b9a9b6da11463d865cb2b59f77f376
+ name: 'Disks discovery'
+ type: DEPENDENT
+ key: hpe.msa.disks.discovery
+ delay: '0'
+ description: 'Discover disks.'
+ item_prototypes:
+ -
+ uuid: 60418ff95d2b4ac698fe041647656005
+ name: 'Disk [{#DURABLE.ID}]: Space total'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.space["{#DURABLE.ID}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total size of the disk.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''size-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '512'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 579f29536b0740b9887cbb0863bd3e45
+ name: 'Disk [{#DURABLE.ID}]: SSD life left'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.ssd["{#DURABLE.ID}",life_left]'
+ delay: '0'
+ history: 7d
+ discover: NO_DISCOVER
+ units: '%'
+ description: 'The percantage of disk life remaining.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''ssd-life-left-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: a430bd06d24447649687dc9b9c3dee2c
+ name: 'Disk [{#DURABLE.ID}]: Disk group'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",group]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'If the disk is in a disk group, the disk group name.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''disk-group''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 17f4069e731b45c7a9d9bfc5786a07fc
+ name: 'Disk [{#DURABLE.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Disk health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: health
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 58d2da30bfe74d05ad05e0b286fe0fae
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=1'
+ name: 'Disk [{#DURABLE.ID}]: Disk health is in degraded state'
+ priority: WARNING
+ description: 'Disk health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 1f0e81d23e1e423ba885425f33773f5b
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=2'
+ name: 'Disk [{#DURABLE.ID}]: Disk health is in fault state'
+ priority: AVERAGE
+ description: 'Disk health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: dc75dd0456a145b3ab0646c9403caeb6
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=3'
+ name: 'Disk [{#DURABLE.ID}]: Disk health is in unknown state'
+ priority: INFO
+ description: 'Disk health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 689e29b31fd0490fb26920c04d094136
+ name: 'Disk [{#DURABLE.ID}]: Model'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",model]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Disk model.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''model''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 20d37295acce41acac8ba77962130774
+ name: 'Disk [{#DURABLE.ID}]: Storage pool'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",pool]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'If the disk is in a pool, the pool name.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''storage-pool-name''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 7c4da69f28824444960e6783fe090526
+ name: 'Disk [{#DURABLE.ID}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Disk serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 770749eafc79429185e7127d95b1ff74
+ name: 'Disk [{#DURABLE.ID}]: Temperature'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",temperature]'
+ delay: '0'
+ history: 7d
+ units: '!°C'
+ description: 'Temperature of the disk.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''temperature-numeric''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 5ba57b2f4d014b2a81c546e8f74a133e
+ name: 'Disk [{#DURABLE.ID}]: Temperature status'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",temperature_status]'
+ delay: '0'
+ history: 7d
+ description: 'Disk temperature status.'
+ valuemap:
+ name: 'Disk temperature status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''temperature-status-numeric''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: IN_RANGE
+ parameters:
+ - '1'
+ - '3'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: health
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: b194f7b133274552823b66e44c88bd02
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=2'
+ name: 'Disk [{#DURABLE.ID}]: Disk temperature is critically high'
+ priority: AVERAGE
+ description: 'Disk temperature is critically high.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: aaabacd5f5194378b6c8388e2ef90abe
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=3'
+ name: 'Disk [{#DURABLE.ID}]: Disk temperature is high'
+ priority: WARNING
+ description: 'Disk temperature is high.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 60d0fc661aa140798f937a63fdd6e5f9
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=4'
+ name: 'Disk [{#DURABLE.ID}]: Disk temperature is unknown'
+ priority: INFO
+ description: 'Disk temperature is unknown.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: d781943c08d24556a083a16cca34ad58
+ name: 'Disk [{#DURABLE.ID}]: Type'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",type]'
+ delay: '0'
+ history: 7d
+ description: |
+ Disk type:
+ SAS: Enterprise SAS spinning disk.
+ SAS MDL: Midline SAS spinning disk.
+ SSD SAS: SAS solit-state disk.
+ valuemap:
+ name: 'Disk type'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''description-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 86ce9f4d139e46908750d158b004b517
+ name: 'Disk [{#DURABLE.ID}]: Vendor'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",vendor]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Disk vendor.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''vendor''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ -
+ lld_macro: '{#TYPE}'
+ path: '$.[''description-numeric'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ overrides:
+ -
+ name: 'SSD life left'
+ step: '1'
+ filter:
+ conditions:
+ -
+ macro: '{#TYPE}'
+ value: '8'
+ formulaid: A
+ operations:
+ -
+ operationobject: ITEM_PROTOTYPE
+ operator: REGEXP
+ value: 'SSD life left'
+ status: ENABLED
+ discover: DISCOVER
+ -
+ uuid: dd952ff876134376baef061dc260884c
+ name: 'Disk groups discovery'
+ type: DEPENDENT
+ key: hpe.msa.disks.groups.discovery
+ delay: '0'
+ description: 'Discover disk groups.'
+ item_prototypes:
+ -
+ uuid: 5b0b3db4bdff429996111d566b6d0386
+ name: 'Disk group [{#NAME}]: Average response time: Read'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",read]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: s
+ description: 'Average response time for all read operations, calculated over the interval since these statistics were last requested or reset.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''avg-read-rsp-time''].first()'
+ -
+ type: MULTIPLIER
+ parameters:
+ - '0.000001'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 4a4fb1ae86df4607882de9c9d40f51f4
+ name: 'Disk group [{#NAME}]: Average response time: Total'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",total]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: s
+ description: 'Average response time for read and write operations, calculated over the interval since these statistics were last requested or reset.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''avg-rsp-time''].first()'
+ -
+ type: MULTIPLIER
+ parameters:
+ - '0.000001'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: a93c1e1b1eee496d861464128aaefa57
+ name: 'Disk group [{#NAME}]: Average response time: Write'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",write]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: s
+ description: 'Average response time for all write operations, calculated over the interval since these statistics were last requested or reset.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''avg-write-rsp-time''].first()'
+ -
+ type: MULTIPLIER
+ parameters:
+ - '0.000001'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 46ba55c8ec2e4811b254441f22ead159
+ name: 'Disk group [{#NAME}]: Data transfer rate: Reads'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.data_transfer.reads["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data read rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''data-read-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: b1e2347ea10b4e84bb227668f5560b14
+ name: 'Disk group [{#NAME}]: Data transfer rate: Total'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.data_transfer.total["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ units: Bps
+ description: 'The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''bytes-per-second-numeric''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: a3df11b895fa425799c34516050000bd
+ name: 'Disk group [{#NAME}]: Data transfer rate: Writes'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.data_transfer.writes["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data write rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''data-written-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 18cd4383127548b68313184a2b94750f
+ name: 'Disk group [{#NAME}]: IOPS, read rate'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.iops.read["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!r/s'
+ description: 'Number of read operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''number-of-reads''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 044e291ab66d48dcb8b66ee18f638702
+ name: 'Disk group [{#NAME}]: IOPS, total rate'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.iops.total["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ units: '!iops'
+ description: 'Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''iops''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 66ec5badb1d2491d9e07b5ce45486d72
+ name: 'Disk group [{#NAME}]: IOPS, write rate'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.iops.write["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!w/s'
+ description: 'Number of write operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''number-of-writes''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 5356a1f819a54c59bb3765d99a965537
+ name: 'Disk group [{#NAME}]: RAID type'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.raid["{#NAME}",type]'
+ delay: '0'
+ history: 7d
+ description: 'The RAID level of the disk group.'
+ valuemap:
+ name: 'RAID type'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''raidtype-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: b1c95904002b4c17a1c007c664fa4ff8
+ name: 'Disk group [{#NAME}]: Space free'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.space["{#NAME}",free]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The free space in the disk group.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''freespace-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '512'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: bfe1a64952754488898798f5f07e24b1
+ name: 'Disk group [{#NAME}]: Pool space used'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.space["{#NAME}",pool_util]'
+ delay: '0'
+ history: 7d
+ units: '%'
+ description: 'The percentage of pool capacity that the disk group occupies.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''pool-percentage''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 29eae883b9fc4e2191daa870bd9d58ad
+ name: 'Disk group [{#NAME}]: Space total'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.space["{#NAME}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The capacity of the disk group.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''size-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '512'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 760b63c8140544dd8af0de8fd873c8cb
+ name: 'Disk group [{#NAME}]: Space utilization'
+ type: CALCULATED
+ key: 'hpe.msa.disks.groups.space["{#NAME}",util]'
+ history: 7d
+ value_type: FLOAT
+ units: '%'
+ params: '100-last(//hpe.msa.disks.groups.space["{#NAME}",free])/last(//hpe.msa.disks.groups.space["{#NAME}",total])*100'
+ description: 'The space utilization percentage in the disk group.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: d6494d79dae94aeda2b78169f8960224
+ expression: 'min(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}'
+ name: 'Disk group [{#NAME}]: Disk group space is critically low'
+ event_name: 'Disk group [{#NAME}]: Disk group space is critically low (used > {$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}%)'
+ priority: AVERAGE
+ description: 'Disk group is running low on free space (less than {$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}% available).'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: ea04be93082640709ec6e58ae640575c
+ expression: 'min(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}'
+ name: 'Disk group [{#NAME}]: Disk group space is low'
+ event_name: 'Disk group [{#NAME}]: Disk group space is low (used > {$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}%)'
+ priority: WARNING
+ description: 'Disk group is running low on free space (less than {$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}% available).'
+ dependencies:
+ -
+ name: 'Disk group [{#NAME}]: Disk group space is critically low'
+ expression: 'min(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 085fae4f87444b62ae5c52703176a533
+ name: 'Disk group [{#NAME}]: Disks count'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups["{#NAME}",disk_count]'
+ delay: '0'
+ history: 7d
+ description: 'Number of disks in the disk group.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''diskcount''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 1c714d46a3ae4e77b4a2e155c047e630
+ name: 'Disk group [{#NAME}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups["{#NAME}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Disk group health.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: component
+ value: health
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: ad99b0f4a6b14b1d9819ab63376e11e7
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=1'
+ name: 'Disk group [{#NAME}]: Disk group health is in degraded state'
+ priority: WARNING
+ description: 'Disk group health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 28f69da63b024079b8953165da6cbfdc
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=2'
+ name: 'Disk group [{#NAME}]: Disk group health is in fault state'
+ priority: AVERAGE
+ description: 'Disk group health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 94695c4222c94bd1b12d9ecb4b21e628
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=3'
+ name: 'Disk group [{#NAME}]: Disk group health is in unknown state'
+ priority: INFO
+ description: 'Disk group health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 27ad0ae81baa43528cf94d3ccc5c3ec3
+ name: 'Disk group [{#NAME}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups["{#NAME}",status]'
+ delay: '0'
+ history: 7d
+ description: |
+ The status of the disk group:
+
+ - CRIT: Critical. The disk group is online but isn't fault tolerant because some of it's disks are down.
+ - DMGD: Damaged. The disk group is online and fault tolerant, but some of it's disks are damaged.
+ - FTDN: Fault tolerant with a down disk.The disk group is online and fault tolerant, but some of it's disks are down.
+ - FTOL: Fault tolerant.
+ - MSNG: Missing. The disk group is online and fault tolerant, but some of it's disks are missing.
+ - OFFL: Offline. Either the disk group is using offline initialization, or it's disks are down and data may be lost.
+ - QTCR: Quarantined critical. The disk group is critical with at least one inaccessible disk. For example, two disks are inaccessible in a RAID 6 disk group or one disk is inaccessible for other fault-tolerant RAID levels. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.
+ - QTDN: Quarantined with a down disk. The RAID6 disk group has one inaccessible disk. The disk group is fault tolerant but degraded. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.
+ - QTOF: Quarantined offline. The disk group is offline with multiple inaccessible disks causing user data to be incomplete, or is an NRAID or RAID 0 disk group.
+ - QTUN: Quarantined unsupported. The disk group contains data in a format that is not supported by this system. For example, this system does not support linear disk groups.
+ - STOP: The disk group is stopped.
+ - UNKN: Unknown.
+ - UP: Up. The disk group is online and does not have fault-tolerant attributes.
+ valuemap:
+ name: 'Disk group status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''status-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: component
+ value: health
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 9bbf1f8a67564b769db5921a2023defd
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=9'
+ name: 'Disk group [{#NAME}]: Disk group has damaged disks'
+ priority: AVERAGE
+ description: 'The disk group is online and fault tolerant, but some of it''s disks are damaged.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: d4d5a63b514d4f1aaff9e8c68db9026e
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=8'
+ name: 'Disk group [{#NAME}]: Disk group has missing disks'
+ priority: AVERAGE
+ description: 'The disk group is online and fault tolerant, but some of it''s disks are missing.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 26b5b53b33c940d5a642ea13d670bf55
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=1'
+ name: 'Disk group [{#NAME}]: Disk group is fault tolerant with a down disk'
+ priority: AVERAGE
+ description: 'The disk group is online and fault tolerant, but some of it''s disks are down.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: b1e7e080f7264ae0be323a500abc211f
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=3'
+ name: 'Disk group [{#NAME}]: Disk group is offline'
+ priority: AVERAGE
+ description: 'Either the disk group is using offline initialization, or it''s disks are down and data may be lost.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: b8b5b248c275453d91c214c19d01f5d9
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=4'
+ name: 'Disk group [{#NAME}]: Disk group is quarantined critical'
+ priority: AVERAGE
+ description: 'The disk group is critical with at least one inaccessible disk. For example, two disks are inaccessible in a RAID 6 disk group or one disk is inaccessible for other fault-tolerant RAID levels. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: bc1c2bbfffd541998099e695f9c98386
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=5'
+ name: 'Disk group [{#NAME}]: Disk group is quarantined offline'
+ priority: AVERAGE
+ description: 'The disk group is offline with multiple inaccessible disks causing user data to be incomplete, or is an NRAID or RAID 0 disk group.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 7c4981f0b0fb4a3891b8a410501224d0
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=5'
+ name: 'Disk group [{#NAME}]: Disk group is quarantined unsupported'
+ priority: AVERAGE
+ description: 'The disk group contains data in a format that is not supported by this system. For example, this system does not support linear disk groups.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 9e762711ecf54f8691e6be32a3e92738
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=6'
+ name: 'Disk group [{#NAME}]: Disk group is quarantined with an inaccessible disk'
+ priority: AVERAGE
+ description: 'The RAID6 disk group has one inaccessible disk. The disk group is fault tolerant but degraded. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 191dbf4bdd294add8ed0815c21f6eadb
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=7'
+ name: 'Disk group [{#NAME}]: Disk group is stopped'
+ priority: AVERAGE
+ description: 'The disk group is stopped.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 05480e1bc3ff4e7a8c5a20286d6f306c
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=2'
+ name: 'Disk group [{#NAME}]: Disk group status is critical'
+ priority: AVERAGE
+ description: 'The disk group is online but isn''t fault tolerant because some of its disks are down.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ graph_prototypes:
+ -
+ uuid: 1d5b8a7246a845678a938da75b7e32cc
+ name: 'Disk group [{#NAME}]: Average response time'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",read]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",write]'
+ -
+ uuid: b718bd4950f64abb892ba3bfe738ad49
+ name: 'Disk group [{#NAME}]: Data transfer rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.data_transfer.reads["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.data_transfer.writes["{#NAME}",rate]'
+ -
+ uuid: 55d7871c891446b086860f8c861fc3f7
+ name: 'Disk group [{#NAME}]: Disk operations rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.iops.read["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.iops.write["{#NAME}",rate]'
+ -
+ uuid: 234be7ebf50e42f6a098662f1fffba03
+ name: 'Disk group [{#NAME}]: Space utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.space["{#NAME}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.space["{#NAME}",total]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: c6713507122242988dc9fae6e77bdff6
+ name: 'Enclosures discovery'
+ type: DEPENDENT
+ key: hpe.msa.enclosures.discovery
+ delay: '0'
+ description: 'Discover enclosures.'
+ item_prototypes:
+ -
+ uuid: 806b44d4f2dd44eea6db7e982c5fea16
+ name: 'Enclosure [{#DURABLE.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Enclosure health.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: component
+ value: health
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 934c5f9e2d19499fab1d88ff9a36c9c9
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=1'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure health is in degraded state'
+ priority: WARNING
+ description: 'Enclosure health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 3d06d5ce761c42e983a5eec029bb671e
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=2'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure health is in fault state'
+ priority: AVERAGE
+ description: 'Enclosure health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: df1275bd16434b1ca77749930e1af3f8
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=3'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure health is in unknown state'
+ priority: INFO
+ description: 'Enclosure health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 42987ecd83d74ffa91a8da7d72aacdb0
+ name: 'Enclosure [{#DURABLE.ID}]: Midplane serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",midplane_serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Midplane serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''midplane-serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 10fff6e5bc2143348c3b0c6a3eb87631
+ name: 'Enclosure [{#DURABLE.ID}]: Model'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",model]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Enclosure model.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''model''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ -
+ uuid: f9279641e2cb4c95a07d43ef1f1caba5
+ name: 'Enclosure [{#DURABLE.ID}]: Part number'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",part_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Enclosure part number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''part-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ -
+ uuid: cd0ec35c114b41579d0dfcebdc5e7211
+ name: 'Enclosure [{#DURABLE.ID}]: Power'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",power]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: W
+ description: 'Enclosure power in watts.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''enclosure-power''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 98205e12a4c44a35a59879da5cc9f39c
+ name: 'Enclosure [{#DURABLE.ID}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Enclosure status.'
+ valuemap:
+ name: 'Enclosure status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''status-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '6'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: component
+ value: health
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: db8329f956d94e74bb6379b29a000bf0
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=2'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure has critical status'
+ priority: HIGH
+ description: 'Enclosure has critical status.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 6a32b4a08bfb49939633b42a16041c7f
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=6'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure has unknown status'
+ priority: INFO
+ description: 'Enclosure has unknown status.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 2fd78acd77804a1f8a474c973bf5c93e
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=3'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure has warning status'
+ priority: WARNING
+ description: 'Enclosure has warning status.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 8cecd9a3ecf14931b8b3ccffff4a4615
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=7'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure is unavailable'
+ priority: HIGH
+ description: 'Enclosure is unavailable.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 458cfb2a9dfb476dae940b66342b12bf
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=4'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure is unrecoverable'
+ priority: HIGH
+ description: 'Enclosure is unrecoverable.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 6900c1efa2b3456ead4ae5e5a033700e
+ name: 'Fans discovery'
+ type: DEPENDENT
+ key: hpe.msa.fans.discovery
+ delay: '0'
+ description: 'Discover fans.'
+ item_prototypes:
+ -
+ uuid: b4732ef73f0e4fcc9458797b28e2b829
+ name: 'Fan [{#DURABLE.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.fans["{#DURABLE.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Fan health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''fans''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fan
+ -
+ tag: component
+ value: health
+ -
+ tag: fan
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 377a9c494a5443c0ba694ab78683da17
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=1'
+ name: 'Fan [{#DURABLE.ID}]: Fan health is in degraded state'
+ priority: WARNING
+ description: 'Fan health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 4446cef7b06140e3a29018944201ebd7
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=2'
+ name: 'Fan [{#DURABLE.ID}]: Fan health is in fault state'
+ priority: AVERAGE
+ description: 'Fan health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 3273a1f3595046e69ef6c74ac6f56eeb
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=3'
+ name: 'Fan [{#DURABLE.ID}]: Fan health is in unknown state'
+ priority: INFO
+ description: 'Fan health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: eb7057d0b65e40138899753b06abfb68
+ name: 'Fan [{#DURABLE.ID}]: Speed'
+ type: DEPENDENT
+ key: 'hpe.msa.fans["{#DURABLE.ID}",speed]'
+ delay: '0'
+ history: 7d
+ units: '!RPM'
+ description: 'Fan speed (revolutions per minute).'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''fans''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''speed''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fan
+ -
+ tag: fan
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 45f948cb8f484367a7a5735beb796a1b
+ name: 'Fan [{#DURABLE.ID}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.fans["{#DURABLE.ID}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Fan status.'
+ valuemap:
+ name: 'Fan status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''fans''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''status-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fan
+ -
+ tag: component
+ value: health
+ -
+ tag: fan
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: f8afe70029aa4cdfb1f68452eea27986
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=1'
+ name: 'Fan [{#DURABLE.ID}]: Fan has error status'
+ priority: AVERAGE
+ description: 'Fan has error status.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 8ad445006c51474fbee30a70971a97a5
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=3'
+ name: 'Fan [{#DURABLE.ID}]: Fan is missing'
+ priority: INFO
+ description: 'Fan is missing.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: fabe4e0bde194675a089db45125428b6
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=2'
+ name: 'Fan [{#DURABLE.ID}]: Fan is off'
+ priority: WARNING
+ description: 'Fan is off.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ graph_prototypes:
+ -
+ uuid: 44c2c9cdec6247cf8f4d0e2bd7e0e372
+ name: 'Fan [{#DURABLE.ID}]: Speed'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.fans["{#DURABLE.ID}",speed]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''fans'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: ec7d856fd690401888f93f8d9c135828
+ name: 'FRU discovery'
+ type: DEPENDENT
+ key: hpe.msa.frus.discovery
+ delay: '0'
+ filter:
+ conditions:
+ -
+ macro: '{#TYPE}'
+ value: ^(POWER_SUPPLY|RAID_IOM|CHASSIS_MIDPLANE)$
+ operator: NOT_MATCHES_REGEX
+ formulaid: A
+ description: 'Discover FRU.'
+ item_prototypes:
+ -
+ uuid: 77df1d8bfba9428e887025a05f02f306
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Part number'
+ type: DEPENDENT
+ key: 'hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",part_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: '{#DESCRIPTION}. Part number of the FRU.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''frus''][?(@[''name''] == "{#TYPE}" && @[''fru-location''] == "{#LOCATION}")].[''part-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fru
+ -
+ tag: fru
+ value: 'Enclosure {#ENCLOSURE.ID}: {#LOCATION}'
+ -
+ uuid: 04fc08de0c3947cba0c8f6c633ae3157
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: '{#DESCRIPTION}. FRU serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''frus''][?(@[''name''] == "{#TYPE}" && @[''fru-location''] == "{#LOCATION}")].[''serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fru
+ -
+ tag: fru
+ value: 'Enclosure {#ENCLOSURE.ID}: {#LOCATION}'
+ -
+ uuid: ef3acb289f9c4a8e919b136dabf7b5c5
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status]'
+ delay: '0'
+ history: 7d
+ description: |
+ {#DESCRIPTION}. FRU status:
+
+ Absent: Component is not present.
+ Fault: At least one subcomponent has a fault.
+ Invalid data: For a power supply module, the EEPROM is improperly programmed.
+ OK: All subcomponents are operating normally.
+ Not available: Status is not available.
+ valuemap:
+ name: 'FRU status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''frus''][?(@[''name''] == "{#TYPE}" && @[''fru-location''] == "{#LOCATION}")].[''fru-status''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: JAVASCRIPT
+ parameters:
+ - |
+ if (value == 'Absent') {
+ return 2;
+ }
+ else if (value == 'Fault') {
+ return 1;
+ }
+ else if (value == 'Invalid Data') {
+ return 0;
+ }
+ else if (value == 'OK') {
+ return 4;
+ }
+ else if (value == 'Not Available') {
+ return 5;
+ }
+ return 6;
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fru
+ -
+ tag: component
+ value: health
+ -
+ tag: fru
+ value: 'Enclosure {#ENCLOSURE.ID}: {#LOCATION}'
+ trigger_prototypes:
+ -
+ uuid: 8182ee0edeb94f4a845c7eda047718c8
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status])=0'
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: FRU ID data is invalid'
+ priority: WARNING
+ description: 'The FRU ID data is invalid. The FRU''s EEPROM is improperly programmed.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 8bef225423a548c3a289c67c40ffd906
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status])=1'
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: FRU status is Degraded or Fault'
+ priority: AVERAGE
+ description: 'FRU status is Degraded or Fault.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DESCRIPTION}'
+ path: '$.[''description'']'
+ -
+ lld_macro: '{#ENCLOSURE.ID}'
+ path: '$.[''enclosure-id'']'
+ -
+ lld_macro: '{#LOCATION}'
+ path: '$.[''fru-location'']'
+ -
+ lld_macro: '{#TYPE}'
+ path: '$.[''name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''frus'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 082c1cfb851548928911b9ab69f6f75e
+ name: 'Pools discovery'
+ type: DEPENDENT
+ key: hpe.msa.pools.discovery
+ delay: '0'
+ description: 'Discover pools.'
+ item_prototypes:
+ -
+ uuid: 2a8b8ebd3bbb4e4e851602e1a84bb0da
+ name: 'Pool [{#NAME}]: Space free'
+ type: DEPENDENT
+ key: 'hpe.msa.pools.space["{#NAME}",free]'
+ delay: '0'
+ history: 7d
+ description: 'The free space in the pool.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools''][?(@[''name''] == "{#NAME}")].[''total-avail-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '512'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ -
+ uuid: 0518c9f95bad4208ba33def89432975d
+ name: 'Pool [{#NAME}]: Space total'
+ type: DEPENDENT
+ key: 'hpe.msa.pools.space["{#NAME}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The capacity of the pool.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools''][?(@[''name''] == "{#NAME}")].[''total-size-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '512'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ -
+ uuid: cc361d77ac8046fc833db41fbd5d2cd3
+ name: 'Pool [{#NAME}]: Space utilization'
+ type: CALCULATED
+ key: 'hpe.msa.pools.space["{#NAME}",util]'
+ history: 7d
+ value_type: FLOAT
+ units: '%'
+ params: '100-last(//hpe.msa.pools.space["{#NAME}",free])/last(//hpe.msa.pools.space["{#NAME}",total])*100'
+ description: 'The space utilization percentage in the pool.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 042ac4fedb00485c8c6f48016182b9dd
+ expression: 'min(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}'
+ name: 'Pool [{#NAME}]: Pool space is critically low'
+ event_name: 'Pool [{#NAME}]: Pool space is critically low (used > {$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}%)'
+ priority: AVERAGE
+ description: 'Pool is running low on free space (less than {$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}% available).'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: f4c7a9ed832d4668be64acf9da3c9814
+ expression: 'min(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}'
+ name: 'Pool [{#NAME}]: Pool space is low'
+ event_name: 'Pool [{#NAME}]: Pool space is low (used > {$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}%)'
+ priority: WARNING
+ description: 'Pool is running low on free space (less than {$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}% available).'
+ dependencies:
+ -
+ name: 'Pool [{#NAME}]: Pool space is critically low'
+ expression: 'min(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 4b79ed6e64cc484bb69f3677cd7932ef
+ name: 'Pool [{#NAME}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.pools["{#NAME}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Pool health.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools''][?(@[''name''] == "{#NAME}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 7af3ccbf497c44adb907b6d15ecebe33
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=1'
+ name: 'Pool [{#NAME}]: Pool health is in degraded state'
+ priority: WARNING
+ description: 'Pool health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 093fa03c7c3f4ac4adbd3234bf6007a0
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=2'
+ name: 'Pool [{#NAME}]: Pool health is in fault state'
+ priority: AVERAGE
+ description: 'Pool health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 2af3a0092d57420c95bf82adc39eae5f
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=3'
+ name: 'Pool [{#NAME}]: Pool health is in unknown state'
+ priority: INFO
+ description: 'Pool [{#NAME}] health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ graph_prototypes:
+ -
+ uuid: 001c0a805d3a40bf86632b498883519d
+ name: 'Pool [{#NAME}]: Space utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.pools.space["{#NAME}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.pools.space["{#NAME}",total]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 09754bd16c674ff08fad52f060035961
+ name: 'Ports discovery'
+ type: DEPENDENT
+ key: hpe.msa.ports.discovery
+ delay: '0'
+ description: 'Discover ports.'
+ item_prototypes:
+ -
+ uuid: 27564169c2b04cba924162a5630bbd4b
+ name: 'Port [{#NAME}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.ports["{#NAME}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Port health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''ports''][?(@[''port''] == "{#NAME}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 266d310dc71e4c60977668e330eec8df
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=1'
+ name: 'Port [{#NAME}]: Port health is in degraded state'
+ priority: WARNING
+ description: 'Port health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 19a02ecfb5d242ff85e233961cc4a384
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=2'
+ name: 'Port [{#NAME}]: Port health is in fault state'
+ priority: AVERAGE
+ description: 'Port health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 8461e41fdd2944f08d3b95c63df0fa9f
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=3'
+ name: 'Port [{#NAME}]: Port health is in unknown state'
+ priority: INFO
+ description: 'Port health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 57986481099a4bffb5b61816e1ba4110
+ name: 'Port [{#NAME}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.ports["{#NAME}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Port status.'
+ valuemap:
+ name: Status
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''ports''][?(@[''port''] == "{#NAME}")].[''status-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 7ff86d50c241496d9bfa54359e17222e
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=2'
+ name: 'Port [{#NAME}]: Port has error status'
+ priority: AVERAGE
+ description: 'Port has error status.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: bdad67d08b92447e9964ea6362c0989c
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=4'
+ name: 'Port [{#NAME}]: Port has unknown status'
+ priority: INFO
+ description: 'Port has unknown status.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 95ba19413bca495aba96f32fa91bc54b
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=1'
+ name: 'Port [{#NAME}]: Port has warning status'
+ priority: WARNING
+ description: 'Port has warning status.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: b1240a5950a3466b9d0725729bef3a03
+ name: 'Port [{#NAME}]: Type'
+ type: DEPENDENT
+ key: 'hpe.msa.ports["{#NAME}",type]'
+ delay: '0'
+ history: 7d
+ description: 'Port type.'
+ valuemap:
+ name: 'Port type'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''ports''][?(@[''port''] == "{#NAME}")].[''port-type-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NAME}'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''port'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''ports'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 2cf7945eea95414a88ce572f4c075bb1
+ name: 'Power supplies discovery'
+ type: DEPENDENT
+ key: hpe.msa.power_supplies.discovery
+ delay: '0'
+ description: 'Discover power supplies.'
+ item_prototypes:
+ -
+ uuid: 4e4f593738fb451cbfd1589a3054387e
+ name: 'Power supply [{#DURABLE.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Power supply health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 2394f69a635a4072bd96494b8df8ae3e
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=1'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply health is in degraded state'
+ priority: WARNING
+ description: 'Power supply health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: f390553cfe4646e0ab9a4fd9cab20886
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=2'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply health is in fault state'
+ priority: AVERAGE
+ description: 'Power supply health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 9499fbdcc6a946138fb6cd69d8be9a00
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=3'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply health is in unknown state'
+ priority: INFO
+ description: 'Power supply health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 1b72c54bff3a4b129e959db43e895839
+ name: 'Power supply [{#DURABLE.ID}]: Part number'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",part_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Power supply part number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''part-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ -
+ uuid: bdbf30f2e70d427bb9237b941fed5941
+ name: 'Power supply [{#DURABLE.ID}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Power supply serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 110fa50ee1d64ecdb064d3bd7b34dc90
+ name: 'Power supply [{#DURABLE.ID}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Power supply status.'
+ valuemap:
+ name: Status
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''status-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 28896e70b14f463aae8c8af4786e52ff
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=2'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply has error status'
+ priority: AVERAGE
+ description: 'Power supply has error status.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: ac6b0d55fbac4f338261f6a90b68e5b0
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=4'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply has unknown status'
+ priority: INFO
+ description: 'Power supply has unknown status.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: c9cddccdeed34aa4a533f0ad07aab5ae
+ expression: 'last(/HPE MSA 2040 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=1'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply has warning status'
+ priority: WARNING
+ description: 'Power supply has warning status.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 8b4399f3d9624239be2e6ac15971300b
+ name: 'Power supply [{#DURABLE.ID}]: Temperature'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",temperature]'
+ delay: '0'
+ history: 7d
+ units: '!°C'
+ description: 'Power supply temperature.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''dctemp''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ graph_prototypes:
+ -
+ uuid: 538040f8853648058e10830ddc2cba70
+ name: 'Power supply [{#DURABLE.ID}]: Temperature'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",temperature]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: faae0d9be7ea4531a584a52002317cc9
+ name: 'Volumes discovery'
+ type: DEPENDENT
+ key: hpe.msa.volumes.discovery
+ delay: '0'
+ description: 'Discover volumes.'
+ item_prototypes:
+ -
+ uuid: f9818ae47544417bb270af4f8f014c0a
+ name: 'Volume [{#NAME}]: Cache: Read hits, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.cache.read.hits["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block to be read is found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''read-cache-hits''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 877afc03787443129373d955067f8c6c
+ name: 'Volume [{#NAME}]: Cache: Read misses, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.cache.read.misses["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block to be read is not found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''read-cache-misses''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: e3a0b52f33e847c980ffe3f4dcda5ab4
+ name: 'Volume [{#NAME}]: Cache: Write hits, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.cache.write.hits["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block written to is found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''write-cache-hits''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: b2b0c3fd7ab74eb3a6013c3f3d65e356
+ name: 'Volume [{#NAME}]: Cache: Write misses, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.cache.write.misses["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block written to is not found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''write-cache-misses''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 6b12caedf23b4b768dbff01096d72c93
+ name: 'Volume [{#NAME}]: Data transfer rate: Reads'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.data_transfer.reads["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data read rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''data-read-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 705428d111dd49d19eb79b6a0de592c1
+ name: 'Volume [{#NAME}]: Data transfer rate: Total'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.data_transfer.total["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ units: Bps
+ description: 'The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''bytes-per-second-numeric''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 5f44581f011b46cf96ebd040de635976
+ name: 'Volume [{#NAME}]: Data transfer rate: Writes'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.data_transfer.writes["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data write rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''data-written-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 0e2831ed17ec4fe0a56b800086b47901
+ name: 'Volume [{#NAME}]: IOPS, read rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.iops.read["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!r/s'
+ description: 'Number of read operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''number-of-reads''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 9d14e4239f5941a7bfb07b6645b9e698
+ name: 'Volume [{#NAME}]: IOPS, total rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.iops.total["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ units: '!iops'
+ description: 'Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''iops''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: e1a6b6cc609c4cf789978f01b18af31f
+ name: 'Volume [{#NAME}]: IOPS, write rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.iops.write["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!w/s'
+ description: 'Number of write operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''number-of-writes''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: b47d7b03e19f4e25803b1d639a0ecf43
+ name: 'Volume [{#NAME}]: Space allocated'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.space["{#NAME}",allocated]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The amount of space currently allocated to the volume.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volumes''][?(@[''volume-name''] == "{#NAME}")].[''allocated-size-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '512'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: b6aaba39f7c74dcf95947626852855c8
+ name: 'Volume [{#NAME}]: Space total'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.space["{#NAME}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The capacity of the volume.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volumes''][?(@[''volume-name''] == "{#NAME}")].[''size-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '512'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ graph_prototypes:
+ -
+ uuid: 20d2047e3f024e5197362375601415eb
+ name: 'Volume [{#NAME}]: Cache usage'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.cache.read.hits["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.cache.read.misses["{#NAME}",rate]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.cache.write.hits["{#NAME}",rate]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.cache.write.misses["{#NAME}",rate]'
+ -
+ uuid: 0b11191f26464e79add18302e245a9cc
+ name: 'Volume [{#NAME}]: Data transfer rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.data_transfer.reads["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.data_transfer.writes["{#NAME}",rate]'
+ -
+ uuid: 133ef12b0cbc49a1a37c594f5c498643
+ name: 'Volume [{#NAME}]: Disk operations rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.iops.read["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.iops.write["{#NAME}",rate]'
+ -
+ uuid: f8c4f07925404bc0b1e3ada45358580a
+ name: 'Volume [{#NAME}]: Space utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.space["{#NAME}",allocated]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2040 Storage by HTTP'
+ key: 'hpe.msa.volumes.space["{#NAME}",total]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''volume-name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volumes'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ tags:
+ -
+ tag: class
+ value: storage
+ -
+ tag: target
+ value: hpe
+ -
+ tag: target
+ value: msa-2040
+ macros:
+ -
+ macro: '{$HPE.MSA.API.PASSWORD}'
+ type: SECRET_TEXT
+ description: 'Specify password for API.'
+ -
+ macro: '{$HPE.MSA.API.PORT}'
+ value: '443'
+ description: 'Connection port for API.'
+ -
+ macro: '{$HPE.MSA.API.SCHEME}'
+ value: https
+ description: 'Connection scheme for API.'
+ -
+ macro: '{$HPE.MSA.API.USERNAME}'
+ value: zabbix
+ description: 'Specify user name for API.'
+ -
+ macro: '{$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT}'
+ value: '90'
+ description: 'The critical threshold of the CPU utilization in %.'
+ -
+ macro: '{$HPE.MSA.DATA.TIMEOUT}'
+ value: 30s
+ description: 'Response timeout for API.'
+ -
+ macro: '{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT}'
+ value: '90'
+ description: 'The critical threshold of the disk group space utilization in %.'
+ -
+ macro: '{$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN}'
+ value: '80'
+ description: 'The warning threshold of the disk group space utilization in %.'
+ -
+ macro: '{$HPE.MSA.POOL.PUSED.MAX.CRIT}'
+ value: '90'
+ description: 'The critical threshold of the pool space utilization in %.'
+ -
+ macro: '{$HPE.MSA.POOL.PUSED.MAX.WARN}'
+ value: '80'
+ description: 'The warning threshold of the pool space utilization in %.'
+ valuemaps:
+ -
+ uuid: 3bb065172c93464c9f5e2e569f523a05
+ name: 'Controller status'
+ mappings:
+ -
+ value: '0'
+ newvalue: Operational
+ -
+ value: '1'
+ newvalue: Down
+ -
+ value: '2'
+ newvalue: 'Not Installed'
+ -
+ uuid: 78f22a3d82a64372abb3e3eeb08cf03e
+ name: 'Disk group status'
+ mappings:
+ -
+ value: '0'
+ newvalue: FTOL
+ -
+ value: '1'
+ newvalue: FTDN
+ -
+ value: '2'
+ newvalue: CRIT
+ -
+ value: '3'
+ newvalue: OFFL
+ -
+ value: '4'
+ newvalue: QTCR
+ -
+ value: '5'
+ newvalue: QTOF
+ -
+ value: '6'
+ newvalue: QTDN
+ -
+ value: '7'
+ newvalue: STOP
+ -
+ value: '8'
+ newvalue: MSNG
+ -
+ value: '9'
+ newvalue: DMGD
+ -
+ value: '11'
+ newvalue: QTDN
+ -
+ value: '250'
+ newvalue: UP
+ -
+ uuid: eb92d7812b8e4d2dbe4908fc3d42ade8
+ name: 'Disk temperature status'
+ mappings:
+ -
+ value: '1'
+ newvalue: OK
+ -
+ value: '2'
+ newvalue: Critical
+ -
+ value: '3'
+ newvalue: Warning
+ -
+ value: '4'
+ newvalue: Unknown
+ -
+ uuid: e6478ee0a41b49778f2a3dc130649838
+ name: 'Disk type'
+ mappings:
+ -
+ value: '4'
+ newvalue: SAS
+ -
+ value: '8'
+ newvalue: 'SSD SAS'
+ -
+ value: '11'
+ newvalue: 'SAS MDL'
+ -
+ uuid: 243d29502c1c416c85eb2ccc961a159c
+ name: 'Enclosure status'
+ mappings:
+ -
+ value: '0'
+ newvalue: Unsupported
+ -
+ value: '1'
+ newvalue: Up
+ -
+ value: '2'
+ newvalue: Error
+ -
+ value: '3'
+ newvalue: Warning
+ -
+ value: '4'
+ newvalue: Unrecoverable
+ -
+ value: '5'
+ newvalue: 'Not Present'
+ -
+ value: '6'
+ newvalue: Unknown
+ -
+ value: '7'
+ newvalue: Unavailable
+ -
+ value: '20'
+ newvalue: 'Spun Down'
+ -
+ uuid: 40916613dcf24dc2beb8634ec67c04bf
+ name: 'Fan status'
+ mappings:
+ -
+ value: '0'
+ newvalue: Up
+ -
+ value: '1'
+ newvalue: Error
+ -
+ value: '2'
+ newvalue: 'Off'
+ -
+ value: '3'
+ newvalue: Missing
+ -
+ uuid: f656acc354ab4593a1c1718668c02001
+ name: 'FRU status'
+ mappings:
+ -
+ value: '0'
+ newvalue: 'Invalid data'
+ -
+ value: '1'
+ newvalue: Fault
+ -
+ value: '2'
+ newvalue: Absent
+ -
+ value: '4'
+ newvalue: OK
+ -
+ value: '5'
+ newvalue: 'Not available'
+ -
+ value: '6'
+ newvalue: Unknown
+ -
+ uuid: 448c57be77694badb75dbdabe9b233df
+ name: Health
+ mappings:
+ -
+ value: '0'
+ newvalue: OK
+ -
+ value: '1'
+ newvalue: Degraded
+ -
+ value: '2'
+ newvalue: Fault
+ -
+ value: '3'
+ newvalue: Unknown
+ -
+ value: '4'
+ newvalue: N/A
+ -
+ uuid: 66a23d01db744677a1878143ccf102c7
+ name: 'Port type'
+ mappings:
+ -
+ value: '0'
+ newvalue: Unknown
+ -
+ value: '6'
+ newvalue: FC
+ -
+ value: '8'
+ newvalue: SAS
+ -
+ value: '9'
+ newvalue: iSCSI
+ -
+ uuid: 996bbe1c4e2841d6ac35efd9b5236fef
+ name: 'RAID type'
+ mappings:
+ -
+ value: '0'
+ newvalue: RAID0
+ -
+ value: '1'
+ newvalue: RAID1
+ -
+ value: '3'
+ newvalue: RAID3
+ -
+ value: '5'
+ newvalue: RAID5
+ -
+ value: '6'
+ newvalue: NRAID
+ -
+ value: '8'
+ newvalue: RAID50
+ -
+ value: '10'
+ newvalue: RAID10
+ -
+ value: '11'
+ newvalue: RAID6
+ -
+ uuid: 6c5d6649be2347ca83258f0ab1a63137
+ name: Status
+ mappings:
+ -
+ value: '0'
+ newvalue: Up
+ -
+ value: '1'
+ newvalue: Warning
+ -
+ value: '2'
+ newvalue: Error
+ -
+ value: '3'
+ newvalue: 'Not present'
+ -
+ value: '4'
+ newvalue: Unknown
+ -
+ value: '6'
+ newvalue: Disconnected
diff --git a/templates/san/hpe_msa2060_http/README.md b/templates/san/hpe_msa2060_http/README.md
new file mode 100644
index 00000000000..4484b0e5b96
--- /dev/null
+++ b/templates/san/hpe_msa2060_http/README.md
@@ -0,0 +1,250 @@
+
+# HPE MSA 2060 Storage by HTTP
+
+## Overview
+
+For Zabbix version: 6.0 and higher
+The template to monitor HPE MSA 2060 by HTTP.
+It works without any external scripts and uses the script item.
+
+
+This template was tested on:
+
+- HPE MSA 2060 Storage
+
+## Setup
+
+> See [Zabbix template operation](https://www.zabbix.com/documentation/6.0/manual/config/templates_out_of_the_box/http) for basic instructions.
+
+1. Create user "zabbix" with monitor role on the storage.
+2. Link the template to a host.
+3. Configure {$HPE.MSA.API.PASSWORD} and an interface with address through which API is accessible.
+4. Change {$HPE.MSA.API.SCHEME} and {$HPE.MSA.API.PORT} macros if needed.
+
+
+## Zabbix configuration
+
+No specific Zabbix configuration is required.
+
+### Macros used
+
+|Name|Description|Default|
+|----|-----------|-------|
+|{$HPE.MSA.API.PASSWORD} |<p>Specify password for API.</p> |`` |
+|{$HPE.MSA.API.PORT} |<p>Connection port for API.</p> |`443` |
+|{$HPE.MSA.API.SCHEME} |<p>Connection scheme for API.</p> |`https` |
+|{$HPE.MSA.API.USERNAME} |<p>Specify user name for API.</p> |`zabbix` |
+|{$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT} |<p>The critical threshold of the CPU utilization in %.</p> |`90` |
+|{$HPE.MSA.DATA.TIMEOUT} |<p>Response timeout for API.</p> |`30s` |
+|{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT} |<p>The critical threshold of the disk group space utilization in %.</p> |`90` |
+|{$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN} |<p>The warning threshold of the disk group space utilization in %.</p> |`80` |
+|{$HPE.MSA.POOL.PUSED.MAX.CRIT} |<p>The critical threshold of the pool space utilization in %.</p> |`90` |
+|{$HPE.MSA.POOL.PUSED.MAX.WARN} |<p>The warning threshold of the pool space utilization in %.</p> |`80` |
+
+## Template links
+
+There are no template links in this template.
+
+## Discovery rules
+
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Controllers discovery |<p>Discover controllers.</p> |DEPENDENT |hpe.msa.controllers.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Disk groups discovery |<p>Discover disk groups.</p> |DEPENDENT |hpe.msa.disks.groups.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Disks discovery |<p>Discover disks.</p> |DEPENDENT |hpe.msa.disks.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Overrides:**</p><p>SSD life left<br> - {#TYPE} MATCHES_REGEX `8`<br> - ITEM_PROTOTYPE REGEXP `SSD life left` - DISCOVER</p> |
+|Enclosures discovery |<p>Discover enclosures.</p> |DEPENDENT |hpe.msa.enclosures.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Fans discovery |<p>Discover fans.</p> |DEPENDENT |hpe.msa.fans.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['fans']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|FRU discovery |<p>Discover FRU.</p> |DEPENDENT |hpe.msa.frus.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['frus']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Filter**:</p> <p>- {#TYPE} NOT_MATCHES_REGEX `^(POWER_SUPPLY|RAID_IOM|CHASSIS_MIDPLANE)$`</p> |
+|Pools discovery |<p>Discover pools.</p> |DEPENDENT |hpe.msa.pools.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Ports discovery |<p>Discover ports.</p> |DEPENDENT |hpe.msa.ports.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['ports']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Power supplies discovery |<p>Discover power supplies.</p> |DEPENDENT |hpe.msa.power_supplies.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Volumes discovery |<p>Discover volumes.</p> |DEPENDENT |hpe.msa.volumes.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volumes']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+
+## Items collected
+
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|HPE |Get method errors |<p>A list of method errors from API requests.</p> |DEPENDENT |hpe.msa.data.errors<p>**Preprocessing**:</p><p>- JSONPATH: `$.['errors']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Product ID |<p>The product model identifier.</p> |DEPENDENT |hpe.msa.system.product_id<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['product-id']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System contact |<p>The name of the person who administers the system.</p> |DEPENDENT |hpe.msa.system.contact<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['system-contact']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System information |<p>A brief description of what the system is used for or how it is configured.</p> |DEPENDENT |hpe.msa.system.info<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['system-information']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System location |<p>The location of the system.</p> |DEPENDENT |hpe.msa.system.location<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['system-location']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System name |<p>The name of the storage system.</p> |DEPENDENT |hpe.msa.system.name<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['system-name']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Vendor name |<p>The vendor name.</p> |DEPENDENT |hpe.msa.system.vendor_name<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['vendor-name']`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |System health |<p>System health status.</p> |DEPENDENT |hpe.msa.system.health<p>**Preprocessing**:</p><p>- JSONPATH: `$.system[0].['health-numeric']`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p> |
+|HPE |HPE MSA: Service ping |<p>Check if HTTP/HTTPS service accepts TCP connections.</p> |SIMPLE |net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Firmware version |<p>Storage controller firmware version.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",firmware]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['sc-fw'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Part number |<p>Part number of the controller.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",part_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['part-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Serial number |<p>Storage controller serial number.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Health |<p>Controller health status.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Status |<p>Storage controller status.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['status-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Disks |<p>Number of disks in the storage system.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",disks]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['disks'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Pools |<p>Number of pools in the storage system.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",pools]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['number-of-storage-pools'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Disk groups |<p>Number of disk groups in the storage system.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",disk_groups]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['virtual-disks'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: IP address |<p>Controller network port IP address.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",ip_address]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['ip-address'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache memory size |<p>Controller cache memory size.</p> |DEPENDENT |hpe.msa.controllers.cache["{#CONTROLLER.ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controllers'][?(@['durable-id'] == "{#DURABLE.ID}")].['cache-memory-size'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Write utilization |<p>Percentage of write cache in use, from 0 to 100.</p> |DEPENDENT |hpe.msa.controllers.cache.write["{#CONTROLLER.ID}",util]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['write-cache-used'].first()`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Read hits, rate |<p>For the controller that owns the volume, the number of times the block to be read is found in cache per second.</p> |DEPENDENT |hpe.msa.controllers.cache.read.hits["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['read-cache-hits'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Read misses, rate |<p>For the controller that owns the volume, the number of times the block to be read is not found in cache per second.</p> |DEPENDENT |hpe.msa.controllers.cache.read.misses["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['read-cache-misses'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Write hits, rate |<p>For the controller that owns the volume, the number of times the block written to is found in cache per second.</p> |DEPENDENT |hpe.msa.controllers.cache.write.hits["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['write-cache-hits'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Cache: Write misses, rate |<p>For the controller that owns the volume, the number of times the block written to is not found in cache per second.</p> |DEPENDENT |hpe.msa.controllers.cache.write.misses["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['write-cache-misses'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: CPU utilization |<p>Percentage of time the CPU is busy, from 0 to 100.</p> |DEPENDENT |hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['cpu-load'].first()`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: IOPS, total rate |<p>Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.controllers.iops.total["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['iops'].first()`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: IOPS, read rate |<p>Number of read operations per second.</p> |DEPENDENT |hpe.msa.controllers.iops.read["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['number-of-reads'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: IOPS, write rate |<p>Number of write operations per second.</p> |DEPENDENT |hpe.msa.controllers.iops.write["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['number-of-writes'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Data transfer rate: Total |<p>The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.controllers.data_transfer.total["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['bytes-per-second-numeric'].first()`</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Data transfer rate: Reads |<p>The data read rate, in bytes per second.</p> |DEPENDENT |hpe.msa.controllers.data_transfer.reads["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['data-read-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Data transfer rate: Writes |<p>The data write rate, in bytes per second.</p> |DEPENDENT |hpe.msa.controllers.data_transfer.writes["{#CONTROLLER.ID}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['data-written-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Controller [{#CONTROLLER.ID}]: Uptime |<p>Number of seconds since the controller was restarted.</p> |DEPENDENT |hpe.msa.controllers["{#CONTROLLER.ID}",uptime]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['controller-statistics'][?(@['durable-id'] == "{#DURABLE.ID}")].['power-on-time'].first()`</p> |
+|HPE |Disk group [{#NAME}]: Disks count |<p>Number of disks in the disk group.</p> |DEPENDENT |hpe.msa.disks.groups["{#NAME}",disk_count]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['diskcount'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Pool space used |<p>The percentage of pool capacity that the disk group occupies.</p> |DEPENDENT |hpe.msa.disks.groups.space["{#NAME}",pool_util]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['pool-percentage'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Health |<p>Disk group health.</p> |DEPENDENT |hpe.msa.disks.groups["{#NAME}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Blocks size |<p>The size of a block, in bytes.</p> |DEPENDENT |hpe.msa.disks.groups.blocks["{#NAME}",size]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['blocksize'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Blocks free |<p>Free space in blocks.</p> |DEPENDENT |hpe.msa.disks.groups.blocks["{#NAME}",free]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['freespace-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Blocks total |<p>Total space in blocks.</p> |DEPENDENT |hpe.msa.disks.groups.blocks["{#NAME}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['blocks'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: Space free |<p>The free space in the disk group.</p> |CALCULATED |hpe.msa.disks.groups.space["{#NAME}",free]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`last(//hpe.msa.disks.groups.blocks["{#NAME}",size])*last(//hpe.msa.disks.groups.blocks["{#NAME}",free])` |
+|HPE |Disk group [{#NAME}]: Space total |<p>The capacity of the disk group.</p> |CALCULATED |hpe.msa.disks.groups.space["{#NAME}",total]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`last(//hpe.msa.disks.groups.blocks["{#NAME}",size])*last(//hpe.msa.disks.groups.blocks["{#NAME}",total])` |
+|HPE |Disk group [{#NAME}]: Space utilization |<p>The space utilization percentage in the disk group.</p> |CALCULATED |hpe.msa.disks.groups.space["{#NAME}",util]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`100-last(//hpe.msa.disks.groups.space["{#NAME}",free])/last(//hpe.msa.disks.groups.space["{#NAME}",total])*100` |
+|HPE |Disk group [{#NAME}]: RAID type |<p>The RAID level of the disk group.</p> |DEPENDENT |hpe.msa.disks.groups.raid["{#NAME}",type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['raidtype-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk group [{#NAME}]: Status |<p>The status of the disk group:</p><p>- CRIT: Critical. The disk group is online but isn't fault tolerant because some of it's disks are down.</p><p>- DMGD: Damaged. The disk group is online and fault tolerant, but some of it's disks are damaged.</p><p>- FTDN: Fault tolerant with a down disk.The disk group is online and fault tolerant, but some of it's disks are down.</p><p>- FTOL: Fault tolerant.</p><p>- MSNG: Missing. The disk group is online and fault tolerant, but some of it's disks are missing.</p><p>- OFFL: Offline. Either the disk group is using offline initialization, or it's disks are down and data may be lost.</p><p>- QTCR: Quarantined critical. The disk group is critical with at least one inaccessible disk. For example, two disks are inaccessible in a RAID 6 disk group or one disk is inaccessible for other fault-tolerant RAID levels. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.</p><p>- QTDN: Quarantined with a down disk. The RAID6 disk group has one inaccessible disk. The disk group is fault tolerant but degraded. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.</p><p>- QTOF: Quarantined offline. The disk group is offline with multiple inaccessible disks causing user data to be incomplete, or is an NRAID or RAID 0 disk group.</p><p>- QTUN: Quarantined unsupported. The disk group contains data in a format that is not supported by this system. For example, this system does not support linear disk groups.</p><p>- STOP: The disk group is stopped.</p><p>- UNKN: Unknown.</p><p>- UP: Up. The disk group is online and does not have fault-tolerant attributes.</p> |DEPENDENT |hpe.msa.disks.groups["{#NAME}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-groups'][?(@['name'] == "{#NAME}")].['status-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk group [{#NAME}]: IOPS, total rate |<p>Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.disks.groups.iops.total["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['iops'].first()`</p> |
+|HPE |Disk group [{#NAME}]: Average response time: Total |<p>Average response time for read and write operations, calculated over the interval since these statistics were last requested or reset.</p> |DEPENDENT |hpe.msa.disks.groups.avg_rsp_time["{#NAME}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['avg-rsp-time'].first()`</p><p>- MULTIPLIER: `0.000001`</p> |
+|HPE |Disk group [{#NAME}]: Average response time: Read |<p>Average response time for all read operations, calculated over the interval since these statistics were last requested or reset.</p> |DEPENDENT |hpe.msa.disks.groups.avg_rsp_time["{#NAME}",read]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['avg-read-rsp-time'].first()`</p><p>- MULTIPLIER: `0.000001`</p> |
+|HPE |Disk group [{#NAME}]: Average response time: Write |<p>Average response time for all write operations, calculated over the interval since these statistics were last requested or reset.</p> |DEPENDENT |hpe.msa.disks.groups.avg_rsp_time["{#NAME}",write]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['avg-write-rsp-time'].first()`</p><p>- MULTIPLIER: `0.000001`</p> |
+|HPE |Disk group [{#NAME}]: IOPS, read rate |<p>Number of read operations per second.</p> |DEPENDENT |hpe.msa.disks.groups.iops.read["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['number-of-reads'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Disk group [{#NAME}]: IOPS, write rate |<p>Number of write operations per second.</p> |DEPENDENT |hpe.msa.disks.groups.iops.write["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['number-of-writes'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Disk group [{#NAME}]: Data transfer rate: Total |<p>The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.disks.groups.data_transfer.total["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['bytes-per-second-numeric'].first()`</p> |
+|HPE |Disk group [{#NAME}]: Data transfer rate: Reads |<p>The data read rate, in bytes per second.</p> |DEPENDENT |hpe.msa.disks.groups.data_transfer.reads["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['data-read-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Disk group [{#NAME}]: Data transfer rate: Writes |<p>The data write rate, in bytes per second.</p> |DEPENDENT |hpe.msa.disks.groups.data_transfer.writes["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disk-group-statistics'][?(@['name'] == "{#NAME}")].['data-written-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Pool [{#NAME}]: Health |<p>Pool health.</p> |DEPENDENT |hpe.msa.pools["{#NAME}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools'][?(@['name'] == "{#NAME}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Pool [{#NAME}]: Blocks size |<p>The size of a block, in bytes.</p> |DEPENDENT |hpe.msa.pools.blocks["{#NAME}",size]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools'][?(@['name'] == "{#NAME}")].['blocksize'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Pool [{#NAME}]: Blocks available |<p>Available space in blocks.</p> |DEPENDENT |hpe.msa.pools.blocks["{#NAME}",available]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools'][?(@['name'] == "{#NAME}")].['total-avail-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Pool [{#NAME}]: Blocks total |<p>Total space in blocks.</p> |DEPENDENT |hpe.msa.pools.blocks["{#NAME}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['pools'][?(@['name'] == "{#NAME}")].['total-size-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Pool [{#NAME}]: Space free |<p>The free space in the pool.</p> |CALCULATED |hpe.msa.pools.space["{#NAME}",free]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`last(//hpe.msa.pools.blocks["{#NAME}",size])*last(//hpe.msa.pools.blocks["{#NAME}",available])` |
+|HPE |Pool [{#NAME}]: Space total |<p>The capacity of the pool.</p> |CALCULATED |hpe.msa.pools.space["{#NAME}",total]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`last(//hpe.msa.pools.blocks["{#NAME}",size])*last(//hpe.msa.pools.blocks["{#NAME}",total])` |
+|HPE |Pool [{#NAME}]: Space utilization |<p>The space utilization percentage in the pool.</p> |CALCULATED |hpe.msa.pools.space["{#NAME}",util]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`100-last(//hpe.msa.pools.space["{#NAME}",free])/last(//hpe.msa.pools.space["{#NAME}",total])*100` |
+|HPE |Volume [{#NAME}]: Blocks size |<p>The size of a block, in bytes.</p> |DEPENDENT |hpe.msa.volumes.blocks["{#NAME}",size]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volumes'][?(@['volume-name'] == "{#NAME}")].['blocksize'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Volume [{#NAME}]: Blocks allocated |<p>The amount of blocks currently allocated to the volume.</p> |DEPENDENT |hpe.msa.volumes.blocks["{#NAME}",allocated]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volumes'][?(@['volume-name'] == "{#NAME}")].['allocated-size-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Volume [{#NAME}]: Blocks total |<p>Total space in blocks.</p> |DEPENDENT |hpe.msa.volumes.blocks["{#NAME}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volumes'][?(@['volume-name'] == "{#NAME}")].['blocks'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Volume [{#NAME}]: Space allocated |<p>The amount of space currently allocated to the volume.</p> |CALCULATED |hpe.msa.volumes.space["{#NAME}",allocated]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`last(//hpe.msa.volumes.blocks["{#NAME}",size])*last(//hpe.msa.volumes.blocks["{#NAME}",allocated])` |
+|HPE |Volume [{#NAME}]: Space total |<p>The capacity of the volume.</p> |CALCULATED |hpe.msa.volumes.space["{#NAME}",total]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>**Expression**:</p>`last(//hpe.msa.volumes.blocks["{#NAME}",size])*last(//hpe.msa.volumes.blocks["{#NAME}",total])` |
+|HPE |Volume [{#NAME}]: IOPS, total rate |<p>Total input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.volumes.iops.total["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['iops'].first()`</p> |
+|HPE |Volume [{#NAME}]: IOPS, read rate |<p>Number of read operations per second.</p> |DEPENDENT |hpe.msa.volumes.iops.read["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['number-of-reads'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: IOPS, write rate |<p>Number of write operations per second.</p> |DEPENDENT |hpe.msa.volumes.iops.write["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['number-of-writes'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Data transfer rate: Total |<p>The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.</p> |DEPENDENT |hpe.msa.volumes.data_transfer.total["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['bytes-per-second-numeric'].first()`</p> |
+|HPE |Volume [{#NAME}]: Data transfer rate: Reads |<p>The data read rate, in bytes per second.</p> |DEPENDENT |hpe.msa.volumes.data_transfer.reads["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['data-read-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Data transfer rate: Writes |<p>The data write rate, in bytes per second.</p> |DEPENDENT |hpe.msa.volumes.data_transfer.writes["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['data-written-numeric'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Cache: Read hits, rate |<p>For the controller that owns the volume, the number of times the block to be read is found in cache per second.</p> |DEPENDENT |hpe.msa.volumes.cache.read.hits["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['read-cache-hits'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Cache: Read misses, rate |<p>For the controller that owns the volume, the number of times the block to be read is not found in cache per second.</p> |DEPENDENT |hpe.msa.volumes.cache.read.misses["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['read-cache-misses'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Cache: Write hits, rate |<p>For the controller that owns the volume, the number of times the block written to is found in cache per second.</p> |DEPENDENT |hpe.msa.volumes.cache.write.hits["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['write-cache-hits'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Volume [{#NAME}]: Cache: Write misses, rate |<p>For the controller that owns the volume, the number of times the block written to is not found in cache per second.</p> |DEPENDENT |hpe.msa.volumes.cache.write.misses["{#NAME}",rate]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['volume-statistics'][?(@['volume-name'] == "{#NAME}")].['write-cache-misses'].first()`</p><p>- CHANGE_PER_SECOND</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Health |<p>Enclosure health.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Status |<p>Enclosure status.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['status-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 6`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Midplane serial number |<p>Midplane serial number.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",midplane_serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['midplane-serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Part number |<p>Enclosure part number.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",part_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['part-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Model |<p>Enclosure model.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",model]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['model'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Enclosure [{#DURABLE.ID}]: Power |<p>Enclosure power in watts.</p> |DEPENDENT |hpe.msa.enclosures["{#DURABLE.ID}",power]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['enclosures'][?(@['durable-id'] == "{#DURABLE.ID}")].['enclosure-power'].first()`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Health |<p>Power supply health status.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Status |<p>Power supply status.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['status-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Part number |<p>Power supply part number.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",part_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['part-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Power supply [{#DURABLE.ID}]: Serial number |<p>Power supply serial number.</p> |DEPENDENT |hpe.msa.power_supplies["{#DURABLE.ID}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['power-supplies'][?(@['durable-id'] == "{#DURABLE.ID}")].['serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Port [{#NAME}]: Health |<p>Port health status.</p> |DEPENDENT |hpe.msa.ports["{#NAME}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['ports'][?(@['port'] == "{#NAME}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Port [{#NAME}]: Status |<p>Port status.</p> |DEPENDENT |hpe.msa.ports["{#NAME}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['ports'][?(@['port'] == "{#NAME}")].['status-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Port [{#NAME}]: Type |<p>Port type.</p> |DEPENDENT |hpe.msa.ports["{#NAME}",type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['ports'][?(@['port'] == "{#NAME}")].['port-type-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Fan [{#DURABLE.ID}]: Health |<p>Fan health status.</p> |DEPENDENT |hpe.msa.fans["{#DURABLE.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['fans'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Fan [{#DURABLE.ID}]: Status |<p>Fan status.</p> |DEPENDENT |hpe.msa.fans["{#DURABLE.ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['fans'][?(@['durable-id'] == "{#DURABLE.ID}")].['status-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Fan [{#DURABLE.ID}]: Speed |<p>Fan speed (revolutions per minute).</p> |DEPENDENT |hpe.msa.fans["{#DURABLE.ID}",speed]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['fans'][?(@['durable-id'] == "{#DURABLE.ID}")].['speed'].first()`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Health |<p>Disk health status.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",health]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['health-numeric'].first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Temperature status |<p>Disk temperature status.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",temperature_status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['temperature-status-numeric'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- IN_RANGE: `1 3`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 4`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Temperature |<p>Temperature of the disk.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",temperature]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['temperature-numeric'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Type |<p>Disk type:</p><p>SAS: Enterprise SAS spinning disk.</p><p>SAS MDL: Midline SAS spinning disk.</p><p>SSD SAS: SAS solit-state disk.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['description-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Disk group |<p>If the disk is in a disk group, the disk group name.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",group]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['disk-group'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Storage pool |<p>If the disk is in a pool, the pool name.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",pool]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['storage-pool-name'].first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Vendor |<p>Disk vendor.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",vendor]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['vendor'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Model |<p>Disk model.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",model]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['model'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Serial number |<p>Disk serial number.</p> |DEPENDENT |hpe.msa.disks["{#DURABLE.ID}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Blocks size |<p>The size of a block, in bytes.</p> |DEPENDENT |hpe.msa.disks.blocks["{#DURABLE.ID}",size]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['blocksize'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Blocks total |<p>Total space in blocks.</p> |DEPENDENT |hpe.msa.disks.blocks["{#DURABLE.ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['blocks'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#DURABLE.ID}]: Space total |<p>Total size of the disk.</p> |CALCULATED |hpe.msa.disks.space["{#DURABLE.ID}",total]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p><p>**Expression**:</p>`last(//hpe.msa.disks.blocks["{#DURABLE.ID}",size])*last(//hpe.msa.disks.blocks["{#DURABLE.ID}",total])` |
+|HPE |Disk [{#DURABLE.ID}]: SSD life left |<p>The percantage of disk life remaining.</p> |DEPENDENT |hpe.msa.disks.ssd["{#DURABLE.ID}",life_left]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['disks'][?(@['durable-id'] == "{#DURABLE.ID}")].['ssd-life-left-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Status |<p>{#DESCRIPTION}. FRU status:</p><p>Absent: The FRU is not present.</p><p>Fault: The FRU's health is Degraded or Fault.</p><p>Invalid data: The FRU ID data is invalid. The FRU's EEPROM is improperly programmed.</p><p>OK: The FRU is operating normally.</p><p>Power off: The FRU is powered off.</p> |DEPENDENT |hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['frus'][?(@['name'] == "{#TYPE}" && @['fru-location'] == "{#LOCATION}")].['fru-status-numeric'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Part number |<p>{#DESCRIPTION}. Part number of the FRU.</p> |DEPENDENT |hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",part_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['frus'][?(@['name'] == "{#TYPE}" && @['fru-location'] == "{#LOCATION}")].['part-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Serial number |<p>{#DESCRIPTION}. FRU serial number.</p> |DEPENDENT |hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.['frus'][?(@['name'] == "{#TYPE}" && @['fru-location'] == "{#LOCATION}")].['serial-number'].first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|Zabbix raw items |HPE MSA: Get data |<p>The JSON with result of API requests.</p> |SCRIPT |hpe.msa.data.get<p>**Expression**:</p>`The text is too long. Please see the template.` |
+
+## Triggers
+
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|There are errors in method requests to API |<p>There are errors in method requests to API.</p> |`length(last(/HPE MSA 2060 Storage by HTTP/hpe.msa.data.errors))>0` |AVERAGE |<p>**Depends on**:</p><p>- Service is down or unavailable</p> |
+|System health is in degraded state |<p>System health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.system.health)=1` |WARNING | |
+|System health is in fault state |<p>System health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.system.health)=2` |AVERAGE | |
+|System health is in unknown state |<p>System health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.system.health)=3` |INFO | |
+|Service is down or unavailable |<p>HTTP/HTTPS service is down or unable to establish TCP connection.</p> |`max(/HPE MSA 2060 Storage by HTTP/net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"],5m)=0` |HIGH | |
+|Controller [{#CONTROLLER.ID}]: Controller health is in degraded state |<p>Controller health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=1` |WARNING |<p>**Depends on**:</p><p>- Controller [{#CONTROLLER.ID}]: Controller is down</p> |
+|Controller [{#CONTROLLER.ID}]: Controller health is in fault state |<p>Controller health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=2` |AVERAGE |<p>**Depends on**:</p><p>- Controller [{#CONTROLLER.ID}]: Controller is down</p> |
+|Controller [{#CONTROLLER.ID}]: Controller health is in unknown state |<p>Controller health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=3` |INFO |<p>**Depends on**:</p><p>- Controller [{#CONTROLLER.ID}]: Controller is down</p> |
+|Controller [{#CONTROLLER.ID}]: Controller is down |<p>The controller is down.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1` |HIGH | |
+|Controller [{#CONTROLLER.ID}]: High CPU utilization |<p>Controller CPU utilization is too high. The system might be slow to respond.</p> |`min(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util],5m)>{$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT}` |WARNING | |
+|Controller [{#CONTROLLER.ID}]: Controller has been restarted |<p>The controller uptime is less than 10 minutes.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",uptime])<10m` |WARNING | |
+|Disk group [{#NAME}]: Disk group health is in degraded state |<p>Disk group health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=1` |WARNING | |
+|Disk group [{#NAME}]: Disk group health is in fault state |<p>Disk group health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=2` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group health is in unknown state |<p>Disk group health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=3` |INFO | |
+|Disk group [{#NAME}]: Disk group space is low |<p>Disk group is running low on free space (less than {$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}% available).</p> |`min(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}` |WARNING |<p>**Depends on**:</p><p>- Disk group [{#NAME}]: Disk group space is critically low</p> |
+|Disk group [{#NAME}]: Disk group space is critically low |<p>Disk group is running low on free space (less than {$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}% available).</p> |`min(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is fault tolerant with a down disk |<p>The disk group is online and fault tolerant, but some of it's disks are down.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=1` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group has damaged disks |<p>The disk group is online and fault tolerant, but some of it's disks are damaged.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=9` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group has missing disks |<p>The disk group is online and fault tolerant, but some of it's disks are missing.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=8` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is offline |<p>Either the disk group is using offline initialization, or it's disks are down and data may be lost.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=3` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is quarantined critical |<p>The disk group is critical with at least one inaccessible disk. For example, two disks are inaccessible in a RAID 6 disk group or one disk is inaccessible for other fault-tolerant RAID levels. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=4` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is quarantined offline |<p>The disk group is offline with multiple inaccessible disks causing user data to be incomplete, or is an NRAID or RAID 0 disk group.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=5` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is quarantined unsupported |<p>The disk group contains data in a format that is not supported by this system. For example, this system does not support linear disk groups.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=5` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is quarantined with an inaccessible disk |<p>The RAID6 disk group has one inaccessible disk. The disk group is fault tolerant but degraded. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=6` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group is stopped |<p>The disk group is stopped.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=7` |AVERAGE | |
+|Disk group [{#NAME}]: Disk group status is critical |<p>The disk group is online but isn't fault tolerant because some of its disks are down.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=2` |AVERAGE | |
+|Pool [{#NAME}]: Pool health is in degraded state |<p>Pool health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=1` |WARNING | |
+|Pool [{#NAME}]: Pool health is in fault state |<p>Pool health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=2` |AVERAGE | |
+|Pool [{#NAME}]: Pool health is in unknown state |<p>Pool [{#NAME}] health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=3` |INFO | |
+|Pool [{#NAME}]: Pool space is low |<p>Pool is running low on free space (less than {$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}% available).</p> |`min(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}` |WARNING |<p>**Depends on**:</p><p>- Pool [{#NAME}]: Pool space is critically low</p> |
+|Pool [{#NAME}]: Pool space is critically low |<p>Pool is running low on free space (less than {$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}% available).</p> |`min(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}` |AVERAGE | |
+|Enclosure [{#DURABLE.ID}]: Enclosure health is in degraded state |<p>Enclosure health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=1` |WARNING | |
+|Enclosure [{#DURABLE.ID}]: Enclosure health is in fault state |<p>Enclosure health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=2` |AVERAGE | |
+|Enclosure [{#DURABLE.ID}]: Enclosure health is in unknown state |<p>Enclosure health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=3` |INFO | |
+|Enclosure [{#DURABLE.ID}]: Enclosure has critical status |<p>Enclosure has critical status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=2` |HIGH | |
+|Enclosure [{#DURABLE.ID}]: Enclosure has warning status |<p>Enclosure has warning status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=3` |WARNING | |
+|Enclosure [{#DURABLE.ID}]: Enclosure is unavailable |<p>Enclosure is unavailable.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=7` |HIGH | |
+|Enclosure [{#DURABLE.ID}]: Enclosure is unrecoverable |<p>Enclosure is unrecoverable.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=4` |HIGH | |
+|Enclosure [{#DURABLE.ID}]: Enclosure has unknown status |<p>Enclosure has unknown status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=6` |INFO | |
+|Power supply [{#DURABLE.ID}]: Power supply health is in degraded state |<p>Power supply health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=1` |WARNING | |
+|Power supply [{#DURABLE.ID}]: Power supply health is in fault state |<p>Power supply health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=2` |AVERAGE | |
+|Power supply [{#DURABLE.ID}]: Power supply health is in unknown state |<p>Power supply health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=3` |INFO | |
+|Power supply [{#DURABLE.ID}]: Power supply has error status |<p>Power supply has error status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=2` |AVERAGE | |
+|Power supply [{#DURABLE.ID}]: Power supply has warning status |<p>Power supply has warning status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=1` |WARNING | |
+|Power supply [{#DURABLE.ID}]: Power supply has unknown status |<p>Power supply has unknown status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=4` |INFO | |
+|Port [{#NAME}]: Port health is in degraded state |<p>Port health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=1` |WARNING | |
+|Port [{#NAME}]: Port health is in fault state |<p>Port health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=2` |AVERAGE | |
+|Port [{#NAME}]: Port health is in unknown state |<p>Port health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=3` |INFO | |
+|Port [{#NAME}]: Port has error status |<p>Port has error status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=2` |AVERAGE | |
+|Port [{#NAME}]: Port has warning status |<p>Port has warning status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=1` |WARNING | |
+|Port [{#NAME}]: Port has unknown status |<p>Port has unknown status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=4` |INFO | |
+|Fan [{#DURABLE.ID}]: Fan health is in degraded state |<p>Fan health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=1` |WARNING | |
+|Fan [{#DURABLE.ID}]: Fan health is in fault state |<p>Fan health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=2` |AVERAGE | |
+|Fan [{#DURABLE.ID}]: Fan health is in unknown state |<p>Fan health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=3` |INFO | |
+|Fan [{#DURABLE.ID}]: Fan has error status |<p>Fan has error status.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=1` |AVERAGE | |
+|Fan [{#DURABLE.ID}]: Fan is missing |<p>Fan is missing.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=3` |INFO | |
+|Fan [{#DURABLE.ID}]: Fan is off |<p>Fan is off.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=2` |WARNING | |
+|Disk [{#DURABLE.ID}]: Disk health is in degraded state |<p>Disk health is in degraded state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=1` |WARNING | |
+|Disk [{#DURABLE.ID}]: Disk health is in fault state |<p>Disk health is in fault state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=2` |AVERAGE | |
+|Disk [{#DURABLE.ID}]: Disk health is in unknown state |<p>Disk health is in unknown state.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=3` |INFO | |
+|Disk [{#DURABLE.ID}]: Disk temperature is high |<p>Disk temperature is high.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=3` |WARNING | |
+|Disk [{#DURABLE.ID}]: Disk temperature is critically high |<p>Disk temperature is critically high.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=2` |AVERAGE | |
+|Disk [{#DURABLE.ID}]: Disk temperature is unknown |<p>Disk temperature is unknown.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=4` |INFO | |
+|FRU [{#ENCLOSURE.ID}: {#LOCATION}]: FRU status is Degraded or Fault |<p>FRU status is Degraded or Fault.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status])=1` |AVERAGE | |
+|FRU [{#ENCLOSURE.ID}: {#LOCATION}]: FRU ID data is invalid |<p>The FRU ID data is invalid. The FRU's EEPROM is improperly programmed.</p> |`last(/HPE MSA 2060 Storage by HTTP/hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status])=0` |WARNING | |
+
+## Feedback
+
+Please report any issues with the template at https://support.zabbix.com
+
+You can also provide feedback, discuss the template or ask for help with it at [ZABBIX forums](https://www.zabbix.com/forum/zabbix-suggestions-and-feedback).
+
diff --git a/templates/san/hpe_msa2060_http/template_san_hpe_msa2060_http.yaml b/templates/san/hpe_msa2060_http/template_san_hpe_msa2060_http.yaml
new file mode 100644
index 00000000000..69702938fc4
--- /dev/null
+++ b/templates/san/hpe_msa2060_http/template_san_hpe_msa2060_http.yaml
@@ -0,0 +1,4559 @@
+zabbix_export:
+ version: '6.0'
+ date: '2022-06-16T07:39:55Z'
+ groups:
+ -
+ uuid: 7c2cb727f85b492d88cd56e17127c64d
+ name: Templates/SAN
+ templates:
+ -
+ uuid: 10537641cfa3416ab0f1451cdb61d804
+ template: 'HPE MSA 2060 Storage by HTTP'
+ name: 'HPE MSA 2060 Storage by HTTP'
+ description: |
+ The template to monitor HPE MSA 2060 by HTTP.
+ It works without any external scripts and uses the script item.
+
+ Setup:
+ 1. Create user "zabbix" with monitor role on the storage.
+ 2. Link the template to a host.
+ 3. Configure {$HPE.MSA.API.PASSWORD} and an interface with address through which API is accessible.
+ 4. Change {$HPE.MSA.API.SCHEME} and {$HPE.MSA.API.PORT} macros if needed.
+
+ You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback
+
+ Template tooling version used: 0.41
+ groups:
+ -
+ name: Templates/SAN
+ items:
+ -
+ uuid: 078dd015f25d4778af429f9b5e391bc5
+ name: 'Get method errors'
+ type: DEPENDENT
+ key: hpe.msa.data.errors
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: TEXT
+ description: 'A list of method errors from API requests.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''errors'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: errors
+ triggers:
+ -
+ uuid: 2133ddf10a3641d78e609948d6842687
+ expression: 'length(last(/HPE MSA 2060 Storage by HTTP/hpe.msa.data.errors))>0'
+ name: 'There are errors in method requests to API'
+ priority: AVERAGE
+ description: 'There are errors in method requests to API.'
+ dependencies:
+ -
+ name: 'Service is down or unavailable'
+ expression: 'max(/HPE MSA 2060 Storage by HTTP/net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"],5m)=0'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: bafec666b170480f941fe25cb3cf903d
+ name: 'HPE MSA: Get data'
+ type: SCRIPT
+ key: hpe.msa.data.get
+ history: '0'
+ trends: '0'
+ value_type: TEXT
+ params: |
+ var params = JSON.parse(value),
+ fields = ['username', 'password', 'base_url'],
+ methods = [
+ 'system',
+ 'controllers',
+ 'controller-statistics',
+ 'frus',
+ 'disk-groups',
+ 'disk-group-statistics',
+ 'disks',
+ 'enclosures',
+ 'fans',
+ 'pools',
+ 'ports',
+ 'power-supplies',
+ 'volumes',
+ 'volume-statistics'
+ ],
+ result = {},
+ data = {};
+
+ fields.forEach(function (field) {
+ if (typeof params !== 'object' || typeof params[field] === 'undefined' || params[field] === '' ) {
+ throw 'Required param is not set: "' + field + '".';
+ }
+ });
+
+ if (!params.base_url.endsWith('/')) {
+ params.base_url += '/';
+ }
+
+ var response, request = new HttpRequest();
+ request.addHeader('datatype: json');
+
+ auth_string = sha256(params.username + '_' + params.password);
+
+ response = request.get(params.base_url + 'api/login/' + auth_string);
+
+ if (request.getStatus() < 200 || request.getStatus() >= 300) {
+ throw 'Authentication request has failed with status code ' + request.getStatus() + ': ' + response;
+ }
+
+ if (response !== null) {
+ try {
+ auth_data = JSON.parse(response);
+ }
+ catch (error) {
+ throw 'Failed to parse authentication response received from device API.';
+ }
+ }
+
+ session_key = auth_data['status'][0]['response'];
+
+ request.addHeader('sessionKey: ' + session_key);
+
+ data.errors = [];
+
+ methods.forEach(function (method) {
+ response = request.get(params.base_url + 'api/show/' + method);
+ method_error = '';
+
+ if (request.getStatus() < 200 || request.getStatus() >= 300) {
+ method_error = 'Method: ' + method + '. Request has failed with status code ' + request.getStatus() + ': ' + response;
+ data.errors.push(method_error);
+ return;
+ }
+
+ if (response !== null) {
+ try {
+ result = JSON.parse(response);
+ switch (method) {
+ case 'controller-statistics':
+ var stats_array = result['controller-statistics'] || [];
+ for (var i = 0; i < stats_array.length; i++) {
+ result['controller-statistics'][i]['durable-id'] = result['controller-statistics'][i]['durable-id'].toLowerCase();
+ }
+ data[method] = result[method];
+ break;
+ case 'frus':
+ data[method] = result['enclosure-fru'];
+ break;
+ case 'disks':
+ data[method] = result['drives'];
+ break;
+ case 'fans':
+ data[method] = result['fan'];
+ break;
+ case 'ports':
+ data[method] = result['port'];
+ break;
+ default:
+ data[method] = result[method];
+ }
+ }
+ catch (error) {
+ method_error = 'Method: ' + method + '. Failed to parse response received from device API.';
+ }
+ }
+ else {
+ method_error = 'Method: ' + method + '. No data received by request.';
+ }
+
+ if (method_error.length > 0) {
+ data.errors.push(method_error);
+ }
+ });
+
+ if (data.errors.length == 0) {
+ data.errors = '';
+ }
+
+ return JSON.stringify(data);
+ description: 'The JSON with result of API requests.'
+ timeout: '{$HPE.MSA.DATA.TIMEOUT}'
+ parameters:
+ -
+ name: base_url
+ value: '{$HPE.MSA.API.SCHEME}://{HOST.CONN}:{$HPE.MSA.API.PORT}/'
+ -
+ name: username
+ value: '{$HPE.MSA.API.USERNAME}'
+ -
+ name: password
+ value: '{$HPE.MSA.API.PASSWORD}'
+ tags:
+ -
+ tag: component
+ value: raw
+ -
+ uuid: 4c8b2c72135a4af781c0f31730366abe
+ name: 'System contact'
+ type: DEPENDENT
+ key: hpe.msa.system.contact
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The name of the person who administers the system.'
+ inventory_link: CONTACT
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''system-contact'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: dc310d8c55a74a00bed9c004ba33d1fa
+ name: 'System health'
+ type: DEPENDENT
+ key: hpe.msa.system.health
+ delay: '0'
+ history: 7d
+ description: 'System health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''health-numeric'']'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: system
+ triggers:
+ -
+ uuid: 49e8c1d8a14f40b5acb2723e370ccccb
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.system.health)=1'
+ name: 'System health is in degraded state'
+ priority: WARNING
+ description: 'System health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 2709a971f2ce417d8e269a0e5ebdd964
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.system.health)=2'
+ name: 'System health is in fault state'
+ priority: AVERAGE
+ description: 'System health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: fa35428a4f41453984bd0bfa566e0674
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.system.health)=3'
+ name: 'System health is in unknown state'
+ priority: INFO
+ description: 'System health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: c4aae4a5f218472698751d9de8d1087d
+ name: 'System information'
+ type: DEPENDENT
+ key: hpe.msa.system.info
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'A brief description of what the system is used for or how it is configured.'
+ inventory_link: NOTES
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''system-information'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 3768f170e5ef44bca39e89b1f8973e6d
+ name: 'System location'
+ type: DEPENDENT
+ key: hpe.msa.system.location
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The location of the system.'
+ inventory_link: LOCATION
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''system-location'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 00c58217d52e4cd5852bdd9c71c4375f
+ name: 'System name'
+ type: DEPENDENT
+ key: hpe.msa.system.name
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The name of the storage system.'
+ inventory_link: NAME
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''system-name'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 103e58d547284e68b079e92074950ff9
+ name: 'Product ID'
+ type: DEPENDENT
+ key: hpe.msa.system.product_id
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The product model identifier.'
+ inventory_link: MODEL
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''product-id'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 7865d8ae697c40c5b5855c47bb82ccc4
+ name: 'Vendor name'
+ type: DEPENDENT
+ key: hpe.msa.system.vendor_name
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The vendor name.'
+ inventory_link: VENDOR
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.system[0].[''vendor-name'']'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 3831060089ff497993088472e922df38
+ name: 'HPE MSA: Service ping'
+ type: SIMPLE
+ key: 'net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"]'
+ history: 7d
+ description: 'Check if HTTP/HTTPS service accepts TCP connections.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: network
+ triggers:
+ -
+ uuid: 9c1bf26f95d946f386bbf613d3d55779
+ expression: 'max(/HPE MSA 2060 Storage by HTTP/net.tcp.service["{$HPE.MSA.API.SCHEME}","{HOST.CONN}","{$HPE.MSA.API.PORT}"],5m)=0'
+ name: 'Service is down or unavailable'
+ priority: HIGH
+ description: 'HTTP/HTTPS service is down or unable to establish TCP connection.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ discovery_rules:
+ -
+ uuid: 91c30dd0509843898601ce6d489fab03
+ name: 'Controllers discovery'
+ type: DEPENDENT
+ key: hpe.msa.controllers.discovery
+ delay: '0'
+ description: 'Discover controllers.'
+ item_prototypes:
+ -
+ uuid: 73bc16fc631f4386abbc78897db07e13
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Read hits, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.read.hits["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block to be read is found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''read-cache-hits''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 04e14fe4d8ba4693b954ebcac1671649
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Read misses, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.read.misses["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block to be read is not found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''read-cache-misses''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 5cb9f7eb42d2413a90161ac192629073
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Write hits, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.write.hits["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block written to is found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''write-cache-hits''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 61aa7235c6c44cfababd1b2390cc0443
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Write misses, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.write.misses["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block written to is not found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''write-cache-misses''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 0d754544c18143ff98114e1ed316ad1e
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Write utilization'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache.write["{#CONTROLLER.ID}",util]'
+ delay: '0'
+ history: 7d
+ units: '%'
+ description: 'Percentage of write cache in use, from 0 to 100.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''write-cache-used''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 482c5af99fe740278c4663ba300dee04
+ name: 'Controller [{#CONTROLLER.ID}]: Cache memory size'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cache["{#CONTROLLER.ID}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Controller cache memory size.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''cache-memory-size''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 80d6ae014e354f6c844c3b88ea66c530
+ name: 'Controller [{#CONTROLLER.ID}]: CPU utilization'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util]'
+ delay: '0'
+ history: 7d
+ units: '%'
+ description: 'Percentage of time the CPU is busy, from 0 to 100.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''cpu-load''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ trigger_prototypes:
+ -
+ uuid: 0bf68b46b7644ad5ad0123df49c1da35
+ expression: 'min(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util],5m)>{$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT}'
+ name: 'Controller [{#CONTROLLER.ID}]: High CPU utilization'
+ event_name: 'Controller [{#CONTROLLER.ID}]: High CPU utilization (over {$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT}% for 5m)'
+ priority: WARNING
+ description: 'Controller CPU utilization is too high. The system might be slow to respond.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: c8fbfd459fce4149b1459e366b61981a
+ name: 'Controller [{#CONTROLLER.ID}]: Data transfer rate: Reads'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.data_transfer.reads["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data read rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''data-read-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 9c5c23273f5b43ad9e300d2c7b90bc3f
+ name: 'Controller [{#CONTROLLER.ID}]: Data transfer rate: Total'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.data_transfer.total["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ units: Bps
+ description: 'The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''bytes-per-second-numeric''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 94f0b7f7d397453f9227c1b473a77a4e
+ name: 'Controller [{#CONTROLLER.ID}]: Data transfer rate: Writes'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.data_transfer.writes["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data write rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''data-written-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 8b0f014d1ed5470d919357f204b704ca
+ name: 'Controller [{#CONTROLLER.ID}]: IOPS, read rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.iops.read["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!r/s'
+ description: 'Number of read operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''number-of-reads''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 16f2fd5bd9d244daa09aef3f79a5d450
+ name: 'Controller [{#CONTROLLER.ID}]: IOPS, total rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.iops.total["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ units: '!iops'
+ description: 'Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''iops''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 9b8366ac60304c3c98dedc278ad18418
+ name: 'Controller [{#CONTROLLER.ID}]: IOPS, write rate'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers.iops.write["{#CONTROLLER.ID}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!w/s'
+ description: 'Number of write operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''number-of-writes''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 5f6c124f1aef41499ee52616ede02de9
+ name: 'Controller [{#CONTROLLER.ID}]: Disks'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",disks]'
+ delay: '0'
+ history: 7d
+ description: 'Number of disks in the storage system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''disks''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: c70f280c9c494b769b442f3a22a3c173
+ name: 'Controller [{#CONTROLLER.ID}]: Disk groups'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",disk_groups]'
+ delay: '0'
+ history: 7d
+ description: 'Number of disk groups in the storage system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''virtual-disks''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: ba1bb9818a9a487c8742d619316b087e
+ name: 'Controller [{#CONTROLLER.ID}]: Firmware version'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",firmware]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Storage controller firmware version.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''sc-fw''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 5f5307f2904a4792af1906a2b03a2a9b
+ name: 'Controller [{#CONTROLLER.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Controller health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: component
+ value: health
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ trigger_prototypes:
+ -
+ uuid: 3988a5b897a34c84952fa573d7019879
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=1'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller health is in degraded state'
+ priority: WARNING
+ description: 'Controller health is in degraded state.'
+ dependencies:
+ -
+ name: 'Controller [{#CONTROLLER.ID}]: Controller is down'
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 7256e023ac82427bb6ee923d4ff07786
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=2'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller health is in fault state'
+ priority: AVERAGE
+ description: 'Controller health is in fault state.'
+ dependencies:
+ -
+ name: 'Controller [{#CONTROLLER.ID}]: Controller is down'
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 15bc89e6c61549caaf5a66c85446ea9d
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",health])=3'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller health is in unknown state'
+ priority: INFO
+ description: 'Controller health is in unknown state.'
+ dependencies:
+ -
+ name: 'Controller [{#CONTROLLER.ID}]: Controller is down'
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 2c9c2636aeb543ec8e70102c555fe776
+ name: 'Controller [{#CONTROLLER.ID}]: IP address'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",ip_address]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Controller network port IP address.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''ip-address''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 3405ef21e2cb40729e16c5b8aaf35996
+ name: 'Controller [{#CONTROLLER.ID}]: Part number'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",part_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Part number of the controller.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''part-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 9b4ee1a634c3462f8fb48eb0e79984df
+ name: 'Controller [{#CONTROLLER.ID}]: Pools'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",pools]'
+ delay: '0'
+ history: 7d
+ description: 'Number of pools in the storage system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''number-of-storage-pools''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: 6980d1841bc04c79868d6f05bf59921e
+ name: 'Controller [{#CONTROLLER.ID}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Storage controller serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ -
+ uuid: c0c2034fc848400c9b1f09f0c54790b3
+ name: 'Controller [{#CONTROLLER.ID}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Storage controller status.'
+ valuemap:
+ name: 'Controller status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''status-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: component
+ value: health
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ trigger_prototypes:
+ -
+ uuid: 99de4f8de416485db5c3844d1c8d654b
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",status])=1'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller is down'
+ priority: HIGH
+ description: 'The controller is down.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 7a9b3ba8dd5446d0961a6eea595c2b49
+ name: 'Controller [{#CONTROLLER.ID}]: Uptime'
+ type: DEPENDENT
+ key: 'hpe.msa.controllers["{#CONTROLLER.ID}",uptime]'
+ delay: '0'
+ history: 7d
+ units: uptime
+ description: 'Number of seconds since the controller was restarted.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controller-statistics''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''power-on-time''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: controller
+ -
+ tag: controller
+ value: '{#CONTROLLER.ID}'
+ trigger_prototypes:
+ -
+ uuid: 255250aa4b75465a989bf8f3fd805667
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.controllers["{#CONTROLLER.ID}",uptime])<10m'
+ name: 'Controller [{#CONTROLLER.ID}]: Controller has been restarted'
+ event_name: 'Controller [{#CONTROLLER.ID}]: Controller has been restarted (uptime < 10m)'
+ priority: WARNING
+ description: 'The controller uptime is less than 10 minutes.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ graph_prototypes:
+ -
+ uuid: a0bac1256ecf42fb9e980a49e52f008e
+ name: 'Controller [{#CONTROLLER.ID}]: Cache: Write utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.write["{#CONTROLLER.ID}",util]'
+ -
+ uuid: 2b3343a641304872a82c84e1b918f8b3
+ name: 'Controller [{#CONTROLLER.ID}]: Cache usage'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.read.hits["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.read.misses["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.write.hits["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.cache.write.misses["{#CONTROLLER.ID}",rate]'
+ -
+ uuid: ed2117af47d94be9bed0632a0b662a25
+ name: 'Controller [{#CONTROLLER.ID}]: Controller CPU utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.cpu["{#CONTROLLER.ID}",util]'
+ -
+ uuid: 27b53c540cae45da9b2e13cbbb1ab821
+ name: 'Controller [{#CONTROLLER.ID}]: Data transfer rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.data_transfer.reads["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.data_transfer.writes["{#CONTROLLER.ID}",rate]'
+ -
+ uuid: ce3c794ac9424be5a104b812680cc77b
+ name: 'Controller [{#CONTROLLER.ID}]: Disk operations rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.iops.read["{#CONTROLLER.ID}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.controllers.iops.write["{#CONTROLLER.ID}",rate]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#CONTROLLER.ID}'
+ path: '$.[''controller-id'']'
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''controllers'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 46478b42c76348d7824c715fd6d20f74
+ name: 'Disks discovery'
+ type: DEPENDENT
+ key: hpe.msa.disks.discovery
+ delay: '0'
+ description: 'Discover disks.'
+ item_prototypes:
+ -
+ uuid: 4fedb88c1bb74c2cb5a0f72fdfcff104
+ name: 'Disk [{#DURABLE.ID}]: Blocks size'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.blocks["{#DURABLE.ID}",size]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The size of a block, in bytes.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''blocksize''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: a491cb03df9c4e3ead70e0a74d9337b2
+ name: 'Disk [{#DURABLE.ID}]: Blocks total'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.blocks["{#DURABLE.ID}",total]'
+ delay: '0'
+ history: 7d
+ description: 'Total space in blocks.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''blocks''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 6c20cf4e84b0427fbe797fc209d78785
+ name: 'Disk [{#DURABLE.ID}]: Space total'
+ type: CALCULATED
+ key: 'hpe.msa.disks.space["{#DURABLE.ID}",total]'
+ delay: 1h
+ history: 7d
+ units: B
+ params: 'last(//hpe.msa.disks.blocks["{#DURABLE.ID}",size])*last(//hpe.msa.disks.blocks["{#DURABLE.ID}",total])'
+ description: 'Total size of the disk.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 80ea0929a1bf43f4bdeba80e675c52bd
+ name: 'Disk [{#DURABLE.ID}]: SSD life left'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.ssd["{#DURABLE.ID}",life_left]'
+ delay: '0'
+ history: 7d
+ discover: NO_DISCOVER
+ units: '%'
+ description: 'The percantage of disk life remaining.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''ssd-life-left-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: f5bb9b7f437f434d83ca0542e41b2673
+ name: 'Disk [{#DURABLE.ID}]: Disk group'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",group]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'If the disk is in a disk group, the disk group name.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''disk-group''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 86fca5ad02af49c8a1d48f4a260a0dbf
+ name: 'Disk [{#DURABLE.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Disk health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: health
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: f76f8eec05a94e2db9d4cd3bcbb43aa4
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=1'
+ name: 'Disk [{#DURABLE.ID}]: Disk health is in degraded state'
+ priority: WARNING
+ description: 'Disk health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 383181e44a114334ab28ff09f49b2d51
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=2'
+ name: 'Disk [{#DURABLE.ID}]: Disk health is in fault state'
+ priority: AVERAGE
+ description: 'Disk health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 2b2d78c6c29f4bd58eff632809dee978
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",health])=3'
+ name: 'Disk [{#DURABLE.ID}]: Disk health is in unknown state'
+ priority: INFO
+ description: 'Disk health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 8f8ad679881c4693acfed363e5498b34
+ name: 'Disk [{#DURABLE.ID}]: Model'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",model]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Disk model.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''model''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 7fffecbf1ede4a5e9da5efc4311fc62e
+ name: 'Disk [{#DURABLE.ID}]: Storage pool'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",pool]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'If the disk is in a pool, the pool name.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''storage-pool-name''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 9a43a148ad4742e1a1df0038b36a171f
+ name: 'Disk [{#DURABLE.ID}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Disk serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 119dc5c43fb741028ccd599d25ad032c
+ name: 'Disk [{#DURABLE.ID}]: Temperature'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",temperature]'
+ delay: '0'
+ history: 7d
+ units: '!°C'
+ description: 'Temperature of the disk.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''temperature-numeric''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 0a0cf4600214443aa504d5c55d1f4015
+ name: 'Disk [{#DURABLE.ID}]: Temperature status'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",temperature_status]'
+ delay: '0'
+ history: 7d
+ description: 'Disk temperature status.'
+ valuemap:
+ name: 'Disk temperature status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''temperature-status-numeric''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: IN_RANGE
+ parameters:
+ - '1'
+ - '3'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: health
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: d4b8f77421d744918e087f696b3f0fff
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=2'
+ name: 'Disk [{#DURABLE.ID}]: Disk temperature is critically high'
+ priority: AVERAGE
+ description: 'Disk temperature is critically high.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: fbbac4048fda477a99f00566624b6bdb
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=3'
+ name: 'Disk [{#DURABLE.ID}]: Disk temperature is high'
+ priority: WARNING
+ description: 'Disk temperature is high.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 41e4f00446304206804da350a88ce3b9
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks["{#DURABLE.ID}",temperature_status])=4'
+ name: 'Disk [{#DURABLE.ID}]: Disk temperature is unknown'
+ priority: INFO
+ description: 'Disk temperature is unknown.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 1a23ef68bb484fd5baeba2b352b970db
+ name: 'Disk [{#DURABLE.ID}]: Type'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",type]'
+ delay: '0'
+ history: 7d
+ description: |
+ Disk type:
+ SAS: Enterprise SAS spinning disk.
+ SAS MDL: Midline SAS spinning disk.
+ SSD SAS: SAS solit-state disk.
+ valuemap:
+ name: 'Disk type'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''description-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ -
+ uuid: d8e35779834640c8afdc5874f72fe8af
+ name: 'Disk [{#DURABLE.ID}]: Vendor'
+ type: DEPENDENT
+ key: 'hpe.msa.disks["{#DURABLE.ID}",vendor]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Disk vendor.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''vendor''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: disk
+ value: '{#DURABLE.ID}'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ -
+ lld_macro: '{#TYPE}'
+ path: '$.[''description-numeric'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disks'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ overrides:
+ -
+ name: 'SSD life left'
+ step: '1'
+ filter:
+ conditions:
+ -
+ macro: '{#TYPE}'
+ value: '8'
+ formulaid: A
+ operations:
+ -
+ operationobject: ITEM_PROTOTYPE
+ operator: REGEXP
+ value: 'SSD life left'
+ status: ENABLED
+ discover: DISCOVER
+ -
+ uuid: 88aaea8c16a247559c68783ad0cd5c4d
+ name: 'Disk groups discovery'
+ type: DEPENDENT
+ key: hpe.msa.disks.groups.discovery
+ delay: '0'
+ description: 'Discover disk groups.'
+ item_prototypes:
+ -
+ uuid: 8f68ad1b814d4287a6fd72d5bd03f7da
+ name: 'Disk group [{#NAME}]: Average response time: Read'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",read]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: s
+ description: 'Average response time for all read operations, calculated over the interval since these statistics were last requested or reset.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''avg-read-rsp-time''].first()'
+ -
+ type: MULTIPLIER
+ parameters:
+ - '0.000001'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 2ae8acbcd0b9442c9adc8086fa36fa40
+ name: 'Disk group [{#NAME}]: Average response time: Total'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",total]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: s
+ description: 'Average response time for read and write operations, calculated over the interval since these statistics were last requested or reset.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''avg-rsp-time''].first()'
+ -
+ type: MULTIPLIER
+ parameters:
+ - '0.000001'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: f99ce5e6e31140c298ee447d3a2b8c4d
+ name: 'Disk group [{#NAME}]: Average response time: Write'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",write]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: s
+ description: 'Average response time for all write operations, calculated over the interval since these statistics were last requested or reset.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''avg-write-rsp-time''].first()'
+ -
+ type: MULTIPLIER
+ parameters:
+ - '0.000001'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 705fce660a944a47ad7ff0e9c9b1d37e
+ name: 'Disk group [{#NAME}]: Blocks free'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.blocks["{#NAME}",free]'
+ delay: '0'
+ history: 7d
+ description: 'Free space in blocks.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''freespace-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 27e3fc79212e407ca1ae5fb06557440d
+ name: 'Disk group [{#NAME}]: Blocks size'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.blocks["{#NAME}",size]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The size of a block, in bytes.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''blocksize''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: f14e651fd9dc4b03bb00e5db780f0114
+ name: 'Disk group [{#NAME}]: Blocks total'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.blocks["{#NAME}",total]'
+ delay: '0'
+ history: 7d
+ description: 'Total space in blocks.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''blocks''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: ecd3de6d32e94d2ab50111659147c97e
+ name: 'Disk group [{#NAME}]: Data transfer rate: Reads'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.data_transfer.reads["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data read rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''data-read-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 28b236ea619f4130a3271459e9fce06b
+ name: 'Disk group [{#NAME}]: Data transfer rate: Total'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.data_transfer.total["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ units: Bps
+ description: 'The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''bytes-per-second-numeric''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 51ef802067c149bea1d5d976df6e3a6f
+ name: 'Disk group [{#NAME}]: Data transfer rate: Writes'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.data_transfer.writes["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data write rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''data-written-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 95925d6d4af94964b388208ff185642d
+ name: 'Disk group [{#NAME}]: IOPS, read rate'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.iops.read["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!r/s'
+ description: 'Number of read operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''number-of-reads''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: c9fdf59576554063b404d190ad90db18
+ name: 'Disk group [{#NAME}]: IOPS, total rate'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.iops.total["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ units: '!iops'
+ description: 'Input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''iops''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 31f5b13a56704e438b600df70c37a1fd
+ name: 'Disk group [{#NAME}]: IOPS, write rate'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.iops.write["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!w/s'
+ description: 'Number of write operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-group-statistics''][?(@[''name''] == "{#NAME}")].[''number-of-writes''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 7359b1d550734d30bb83612538b36e95
+ name: 'Disk group [{#NAME}]: RAID type'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.raid["{#NAME}",type]'
+ delay: '0'
+ history: 7d
+ description: 'The RAID level of the disk group.'
+ valuemap:
+ name: 'RAID type'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''raidtype-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: b0d49d3da6b14b9cb8eeb95a3665a26e
+ name: 'Disk group [{#NAME}]: Space free'
+ type: CALCULATED
+ key: 'hpe.msa.disks.groups.space["{#NAME}",free]'
+ history: 7d
+ units: B
+ params: 'last(//hpe.msa.disks.groups.blocks["{#NAME}",size])*last(//hpe.msa.disks.groups.blocks["{#NAME}",free])'
+ description: 'The free space in the disk group.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: bc8e6e0fb286466593186708cddf3b2a
+ name: 'Disk group [{#NAME}]: Pool space used'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups.space["{#NAME}",pool_util]'
+ delay: '0'
+ history: 7d
+ units: '%'
+ description: 'The percentage of pool capacity that the disk group occupies.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''pool-percentage''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: fb3dd5308c97446693932206be17ace3
+ name: 'Disk group [{#NAME}]: Space total'
+ type: CALCULATED
+ key: 'hpe.msa.disks.groups.space["{#NAME}",total]'
+ history: 7d
+ units: B
+ params: 'last(//hpe.msa.disks.groups.blocks["{#NAME}",size])*last(//hpe.msa.disks.groups.blocks["{#NAME}",total])'
+ description: 'The capacity of the disk group.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 2a9c7f901f494b8bb7d2c74cd7c3030c
+ name: 'Disk group [{#NAME}]: Space utilization'
+ type: CALCULATED
+ key: 'hpe.msa.disks.groups.space["{#NAME}",util]'
+ history: 7d
+ value_type: FLOAT
+ units: '%'
+ params: '100-last(//hpe.msa.disks.groups.space["{#NAME}",free])/last(//hpe.msa.disks.groups.space["{#NAME}",total])*100'
+ description: 'The space utilization percentage in the disk group.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: df1af9dad6444821a86a26158469d0cb
+ expression: 'min(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}'
+ name: 'Disk group [{#NAME}]: Disk group space is critically low'
+ event_name: 'Disk group [{#NAME}]: Disk group space is critically low (used > {$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}%)'
+ priority: AVERAGE
+ description: 'Disk group is running low on free space (less than {$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}% available).'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 713960711c324dc780998f8f263344a2
+ expression: 'min(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}'
+ name: 'Disk group [{#NAME}]: Disk group space is low'
+ event_name: 'Disk group [{#NAME}]: Disk group space is low (used > {$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}%)'
+ priority: WARNING
+ description: 'Disk group is running low on free space (less than {$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN:"{#NAME}"}% available).'
+ dependencies:
+ -
+ name: 'Disk group [{#NAME}]: Disk group space is critically low'
+ expression: 'min(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups.space["{#NAME}",util],5m)>{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT:"{#NAME}"}'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 4c6bbdcdb05d45e0af52548aef4e8716
+ name: 'Disk group [{#NAME}]: Disks count'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups["{#NAME}",disk_count]'
+ delay: '0'
+ history: 7d
+ description: 'Number of disks in the disk group.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''diskcount''].first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ -
+ uuid: 97b6e0e2ec844636be64931fca6e2c6c
+ name: 'Disk group [{#NAME}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups["{#NAME}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Disk group health.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: component
+ value: health
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 7899b8a15b5042f3a4467a7cdee4c6ae
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=1'
+ name: 'Disk group [{#NAME}]: Disk group health is in degraded state'
+ priority: WARNING
+ description: 'Disk group health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: e7c6a3b20c424196854a5437aba4c3ec
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=2'
+ name: 'Disk group [{#NAME}]: Disk group health is in fault state'
+ priority: AVERAGE
+ description: 'Disk group health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 177dd9d1cfa54b3e8c9e6479cb96af03
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",health])=3'
+ name: 'Disk group [{#NAME}]: Disk group health is in unknown state'
+ priority: INFO
+ description: 'Disk group health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 6755c1253e83442780eeb31d67062980
+ name: 'Disk group [{#NAME}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.disks.groups["{#NAME}",status]'
+ delay: '0'
+ history: 7d
+ description: |
+ The status of the disk group:
+
+ - CRIT: Critical. The disk group is online but isn't fault tolerant because some of it's disks are down.
+ - DMGD: Damaged. The disk group is online and fault tolerant, but some of it's disks are damaged.
+ - FTDN: Fault tolerant with a down disk.The disk group is online and fault tolerant, but some of it's disks are down.
+ - FTOL: Fault tolerant.
+ - MSNG: Missing. The disk group is online and fault tolerant, but some of it's disks are missing.
+ - OFFL: Offline. Either the disk group is using offline initialization, or it's disks are down and data may be lost.
+ - QTCR: Quarantined critical. The disk group is critical with at least one inaccessible disk. For example, two disks are inaccessible in a RAID 6 disk group or one disk is inaccessible for other fault-tolerant RAID levels. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.
+ - QTDN: Quarantined with a down disk. The RAID6 disk group has one inaccessible disk. The disk group is fault tolerant but degraded. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.
+ - QTOF: Quarantined offline. The disk group is offline with multiple inaccessible disks causing user data to be incomplete, or is an NRAID or RAID 0 disk group.
+ - QTUN: Quarantined unsupported. The disk group contains data in a format that is not supported by this system. For example, this system does not support linear disk groups.
+ - STOP: The disk group is stopped.
+ - UNKN: Unknown.
+ - UP: Up. The disk group is online and does not have fault-tolerant attributes.
+ valuemap:
+ name: 'Disk group status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups''][?(@[''name''] == "{#NAME}")].[''status-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: disk-group
+ -
+ tag: component
+ value: health
+ -
+ tag: disk-group
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 8deee88d964846598d5574d197694b17
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=9'
+ name: 'Disk group [{#NAME}]: Disk group has damaged disks'
+ priority: AVERAGE
+ description: 'The disk group is online and fault tolerant, but some of it''s disks are damaged.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: c615e1bb1c824e7ba109b8a6580eb9b9
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=8'
+ name: 'Disk group [{#NAME}]: Disk group has missing disks'
+ priority: AVERAGE
+ description: 'The disk group is online and fault tolerant, but some of it''s disks are missing.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: bead3a0bb95342b3b3ceae7becff99b8
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=1'
+ name: 'Disk group [{#NAME}]: Disk group is fault tolerant with a down disk'
+ priority: AVERAGE
+ description: 'The disk group is online and fault tolerant, but some of it''s disks are down.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: c89466e00c2b40c1933fde60332a428a
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=3'
+ name: 'Disk group [{#NAME}]: Disk group is offline'
+ priority: AVERAGE
+ description: 'Either the disk group is using offline initialization, or it''s disks are down and data may be lost.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 3dc5b3bc1128451491217639cf4e5115
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=4'
+ name: 'Disk group [{#NAME}]: Disk group is quarantined critical'
+ priority: AVERAGE
+ description: 'The disk group is critical with at least one inaccessible disk. For example, two disks are inaccessible in a RAID 6 disk group or one disk is inaccessible for other fault-tolerant RAID levels. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 6892c8c05331497ab37db2b2fe3673a1
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=5'
+ name: 'Disk group [{#NAME}]: Disk group is quarantined offline'
+ priority: AVERAGE
+ description: 'The disk group is offline with multiple inaccessible disks causing user data to be incomplete, or is an NRAID or RAID 0 disk group.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 8a8bda977e11462a906fd200f1b67a72
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=5'
+ name: 'Disk group [{#NAME}]: Disk group is quarantined unsupported'
+ priority: AVERAGE
+ description: 'The disk group contains data in a format that is not supported by this system. For example, this system does not support linear disk groups.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 21f06dd8f8de49f58a64a638d24ff905
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=6'
+ name: 'Disk group [{#NAME}]: Disk group is quarantined with an inaccessible disk'
+ priority: AVERAGE
+ description: 'The RAID6 disk group has one inaccessible disk. The disk group is fault tolerant but degraded. If the inaccessible disks come online or if after 60 seconds from being quarantined the disk group is QTCRor QTDN, the disk group is automatically dequarantined.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 1914fede726744829b2e41392b957857
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=7'
+ name: 'Disk group [{#NAME}]: Disk group is stopped'
+ priority: AVERAGE
+ description: 'The disk group is stopped.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: a79a6cf86bd44f55a7859808f632bf48
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.disks.groups["{#NAME}",status])=2'
+ name: 'Disk group [{#NAME}]: Disk group status is critical'
+ priority: AVERAGE
+ description: 'The disk group is online but isn''t fault tolerant because some of its disks are down.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ graph_prototypes:
+ -
+ uuid: e1f7331965524670b8c44c0b0d8eb99b
+ name: 'Disk group [{#NAME}]: Average response time'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",read]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.avg_rsp_time["{#NAME}",write]'
+ -
+ uuid: 1354b947316a46be8dc696c29f408a6b
+ name: 'Disk group [{#NAME}]: Data transfer rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.data_transfer.reads["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.data_transfer.writes["{#NAME}",rate]'
+ -
+ uuid: f7f556011add4cd6b0fe8e4545c607a0
+ name: 'Disk group [{#NAME}]: Disk operations rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.iops.read["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.iops.write["{#NAME}",rate]'
+ -
+ uuid: 495a941dc4ef45e8b60d6a94bb1fbdcd
+ name: 'Disk group [{#NAME}]: Space utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.space["{#NAME}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.disks.groups.space["{#NAME}",total]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''disk-groups'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 5a97871f702348dca7a5378885087ea8
+ name: 'Enclosures discovery'
+ type: DEPENDENT
+ key: hpe.msa.enclosures.discovery
+ delay: '0'
+ description: 'Discover enclosures.'
+ item_prototypes:
+ -
+ uuid: 2e70432b3c324ecdb78ab77e5f9bbaf3
+ name: 'Enclosure [{#DURABLE.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Enclosure health.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: component
+ value: health
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: d15d460b8c924f609f5cdd055060f8ce
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=1'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure health is in degraded state'
+ priority: WARNING
+ description: 'Enclosure health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 7c2f6a7efbf245298c3ee0b137718dc8
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=2'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure health is in fault state'
+ priority: AVERAGE
+ description: 'Enclosure health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 6732ced099d748daa5cbdf6d97580efd
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",health])=3'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure health is in unknown state'
+ priority: INFO
+ description: 'Enclosure health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: e3a1c5f6dee545a8a7d4b68768d060ab
+ name: 'Enclosure [{#DURABLE.ID}]: Midplane serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",midplane_serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Midplane serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''midplane-serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 1dcecf03b9814aac9749badf800e4717
+ name: 'Enclosure [{#DURABLE.ID}]: Model'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",model]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Enclosure model.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''model''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 89f11d7bf0e24a92bf4d4b4b1d86af58
+ name: 'Enclosure [{#DURABLE.ID}]: Part number'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",part_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Enclosure part number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''part-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ -
+ uuid: b426baf09f1445eda59abd0e2ee6dd2c
+ name: 'Enclosure [{#DURABLE.ID}]: Power'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",power]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: W
+ description: 'Enclosure power in watts.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''enclosure-power''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 602b941548ab417bbe59f3f298bf6da9
+ name: 'Enclosure [{#DURABLE.ID}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.enclosures["{#DURABLE.ID}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Enclosure status.'
+ valuemap:
+ name: 'Enclosure status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''status-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '6'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: enclosure
+ -
+ tag: component
+ value: health
+ -
+ tag: enclosure
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: ef763c350b2e4d20bdecbe50703ec8dd
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=2'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure has critical status'
+ priority: HIGH
+ description: 'Enclosure has critical status.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: e3a7198f287e4600a0abfa929ee183de
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=6'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure has unknown status'
+ priority: INFO
+ description: 'Enclosure has unknown status.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: 27ba4d2474604caaa2712222cf621294
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=3'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure has warning status'
+ priority: WARNING
+ description: 'Enclosure has warning status.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 53b61c7521d94161b063a5ea506b5466
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=7'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure is unavailable'
+ priority: HIGH
+ description: 'Enclosure is unavailable.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 2218d1bf55aa4db0968dab804c0687e3
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.enclosures["{#DURABLE.ID}",status])=4'
+ name: 'Enclosure [{#DURABLE.ID}]: Enclosure is unrecoverable'
+ priority: HIGH
+ description: 'Enclosure is unrecoverable.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''enclosures'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 9043169f17de44baa174459b560de4f5
+ name: 'Fans discovery'
+ type: DEPENDENT
+ key: hpe.msa.fans.discovery
+ delay: '0'
+ description: 'Discover fans.'
+ item_prototypes:
+ -
+ uuid: f9be9af4ff9047f1af946313df3e7165
+ name: 'Fan [{#DURABLE.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.fans["{#DURABLE.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Fan health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''fans''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fan
+ -
+ tag: component
+ value: health
+ -
+ tag: fan
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 3ee1b1d0d6b34c8eba02480e9e4d5be2
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=1'
+ name: 'Fan [{#DURABLE.ID}]: Fan health is in degraded state'
+ priority: WARNING
+ description: 'Fan health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 3e3785f9915d46068ebe2eff21bac813
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=2'
+ name: 'Fan [{#DURABLE.ID}]: Fan health is in fault state'
+ priority: AVERAGE
+ description: 'Fan health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 4bf2e519b5484d338f997ea5dac462e0
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",health])=3'
+ name: 'Fan [{#DURABLE.ID}]: Fan health is in unknown state'
+ priority: INFO
+ description: 'Fan health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: f028a919d56b45129f9ead200519adaa
+ name: 'Fan [{#DURABLE.ID}]: Speed'
+ type: DEPENDENT
+ key: 'hpe.msa.fans["{#DURABLE.ID}",speed]'
+ delay: '0'
+ history: 7d
+ units: '!RPM'
+ description: 'Fan speed (revolutions per minute).'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''fans''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''speed''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fan
+ -
+ tag: fan
+ value: '{#DURABLE.ID}'
+ -
+ uuid: df1d8af5df104afc829b403aec6efc96
+ name: 'Fan [{#DURABLE.ID}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.fans["{#DURABLE.ID}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Fan status.'
+ valuemap:
+ name: 'Fan status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''fans''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''status-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fan
+ -
+ tag: component
+ value: health
+ -
+ tag: fan
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 183a1e1c4d444c9a8189035a2af22dc1
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=1'
+ name: 'Fan [{#DURABLE.ID}]: Fan has error status'
+ priority: AVERAGE
+ description: 'Fan has error status.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 4d9e3d1bb22444f981295df07f0d9c24
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=3'
+ name: 'Fan [{#DURABLE.ID}]: Fan is missing'
+ priority: INFO
+ description: 'Fan is missing.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: a6e4ea796b98432284a9fd9fff1d82f9
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.fans["{#DURABLE.ID}",status])=2'
+ name: 'Fan [{#DURABLE.ID}]: Fan is off'
+ priority: WARNING
+ description: 'Fan is off.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ graph_prototypes:
+ -
+ uuid: 1def9fd4627d4552bf34e8ce35f3cd46
+ name: 'Fan [{#DURABLE.ID}]: Speed'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.fans["{#DURABLE.ID}",speed]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''fans'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 30f91e8f7fba489aa649759219efa67c
+ name: 'FRU discovery'
+ type: DEPENDENT
+ key: hpe.msa.frus.discovery
+ delay: '0'
+ filter:
+ conditions:
+ -
+ macro: '{#TYPE}'
+ value: ^(POWER_SUPPLY|RAID_IOM|CHASSIS_MIDPLANE)$
+ operator: NOT_MATCHES_REGEX
+ formulaid: A
+ description: 'Discover FRU.'
+ item_prototypes:
+ -
+ uuid: 8cbf62d188084ea4a72eaa37987d8d8e
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Part number'
+ type: DEPENDENT
+ key: 'hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",part_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: '{#DESCRIPTION}. Part number of the FRU.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''frus''][?(@[''name''] == "{#TYPE}" && @[''fru-location''] == "{#LOCATION}")].[''part-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fru
+ -
+ tag: fru
+ value: 'Enclosure {#ENCLOSURE.ID}: {#LOCATION}'
+ -
+ uuid: 49c52c2c5b174c78a60756eb7a9e34f1
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: '{#DESCRIPTION}. FRU serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''frus''][?(@[''name''] == "{#TYPE}" && @[''fru-location''] == "{#LOCATION}")].[''serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fru
+ -
+ tag: fru
+ value: 'Enclosure {#ENCLOSURE.ID}: {#LOCATION}'
+ -
+ uuid: d72f7be111ae4335b92d6a1d0ad9e3ee
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status]'
+ delay: '0'
+ history: 7d
+ description: |
+ {#DESCRIPTION}. FRU status:
+
+ Absent: The FRU is not present.
+ Fault: The FRU's health is Degraded or Fault.
+ Invalid data: The FRU ID data is invalid. The FRU's EEPROM is improperly programmed.
+ OK: The FRU is operating normally.
+ Power off: The FRU is powered off.
+ valuemap:
+ name: 'FRU status'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''frus''][?(@[''name''] == "{#TYPE}" && @[''fru-location''] == "{#LOCATION}")].[''fru-status-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: fru
+ -
+ tag: component
+ value: health
+ -
+ tag: fru
+ value: 'Enclosure {#ENCLOSURE.ID}: {#LOCATION}'
+ trigger_prototypes:
+ -
+ uuid: 2533eb2e4344494d9ec72629dab7b1a8
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status])=0'
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: FRU ID data is invalid'
+ priority: WARNING
+ description: 'The FRU ID data is invalid. The FRU''s EEPROM is improperly programmed.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 7a994469e45f467c8582c24258d0eb75
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.frus["{#ENCLOSURE.ID}:{#LOCATION}",status])=1'
+ name: 'FRU [{#ENCLOSURE.ID}: {#LOCATION}]: FRU status is Degraded or Fault'
+ priority: AVERAGE
+ description: 'FRU status is Degraded or Fault.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DESCRIPTION}'
+ path: '$.[''description'']'
+ -
+ lld_macro: '{#ENCLOSURE.ID}'
+ path: '$.[''enclosure-id'']'
+ -
+ lld_macro: '{#LOCATION}'
+ path: '$.[''fru-location'']'
+ -
+ lld_macro: '{#TYPE}'
+ path: '$.[''name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''frus'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 178b94ddcab947ffb1614622c2b7e08e
+ name: 'Pools discovery'
+ type: DEPENDENT
+ key: hpe.msa.pools.discovery
+ delay: '0'
+ description: 'Discover pools.'
+ item_prototypes:
+ -
+ uuid: 09d67b3577af4e21a7bbd09078d705cd
+ name: 'Pool [{#NAME}]: Blocks available'
+ type: DEPENDENT
+ key: 'hpe.msa.pools.blocks["{#NAME}",available]'
+ delay: '0'
+ history: 7d
+ description: 'Available space in blocks.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools''][?(@[''name''] == "{#NAME}")].[''total-avail-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ -
+ uuid: 076921fcd93941b09b79c7d44873417d
+ name: 'Pool [{#NAME}]: Blocks size'
+ type: DEPENDENT
+ key: 'hpe.msa.pools.blocks["{#NAME}",size]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The size of a block, in bytes.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools''][?(@[''name''] == "{#NAME}")].[''blocksize''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ -
+ uuid: fd29559e5bb3455b8b4cfe56f75f54b2
+ name: 'Pool [{#NAME}]: Blocks total'
+ type: DEPENDENT
+ key: 'hpe.msa.pools.blocks["{#NAME}",total]'
+ delay: '0'
+ history: 7d
+ description: 'Total space in blocks.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools''][?(@[''name''] == "{#NAME}")].[''total-size-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ -
+ uuid: d99eeba76b354e73b0118b46402d93bf
+ name: 'Pool [{#NAME}]: Space free'
+ type: CALCULATED
+ key: 'hpe.msa.pools.space["{#NAME}",free]'
+ history: 7d
+ units: B
+ params: 'last(//hpe.msa.pools.blocks["{#NAME}",size])*last(//hpe.msa.pools.blocks["{#NAME}",available])'
+ description: 'The free space in the pool.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ -
+ uuid: 9fce545fbe724da28a13b8ca8759c37d
+ name: 'Pool [{#NAME}]: Space total'
+ type: CALCULATED
+ key: 'hpe.msa.pools.space["{#NAME}",total]'
+ history: 7d
+ units: B
+ params: 'last(//hpe.msa.pools.blocks["{#NAME}",size])*last(//hpe.msa.pools.blocks["{#NAME}",total])'
+ description: 'The capacity of the pool.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ -
+ uuid: ad9bdb342a494d82a36e42e75a3bbf3e
+ name: 'Pool [{#NAME}]: Space utilization'
+ type: CALCULATED
+ key: 'hpe.msa.pools.space["{#NAME}",util]'
+ history: 7d
+ value_type: FLOAT
+ units: '%'
+ params: '100-last(//hpe.msa.pools.space["{#NAME}",free])/last(//hpe.msa.pools.space["{#NAME}",total])*100'
+ description: 'The space utilization percentage in the pool.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: c73b4a77e94a43f5951f6a541d65637e
+ expression: 'min(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}'
+ name: 'Pool [{#NAME}]: Pool space is critically low'
+ event_name: 'Pool [{#NAME}]: Pool space is critically low (used > {$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}%)'
+ priority: AVERAGE
+ description: 'Pool is running low on free space (less than {$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}% available).'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: c7644beb62bc40e99d6045af6d4bc16f
+ expression: 'min(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}'
+ name: 'Pool [{#NAME}]: Pool space is low'
+ event_name: 'Pool [{#NAME}]: Pool space is low (used > {$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}%)'
+ priority: WARNING
+ description: 'Pool is running low on free space (less than {$HPE.MSA.POOL.PUSED.MAX.WARN:"{#NAME}"}% available).'
+ dependencies:
+ -
+ name: 'Pool [{#NAME}]: Pool space is critically low'
+ expression: 'min(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools.space["{#NAME}",util],5m)>{$HPE.MSA.POOL.PUSED.MAX.CRIT:"{#NAME}"}'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 15096639cae947d383a506f0332ff6d3
+ name: 'Pool [{#NAME}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.pools["{#NAME}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Pool health.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools''][?(@[''name''] == "{#NAME}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: pool
+ -
+ tag: pool
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 20723e93add44447a5cab3c8cc4849a6
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=1'
+ name: 'Pool [{#NAME}]: Pool health is in degraded state'
+ priority: WARNING
+ description: 'Pool health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 1881bd0efca04c58a56effb8e232e734
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=2'
+ name: 'Pool [{#NAME}]: Pool health is in fault state'
+ priority: AVERAGE
+ description: 'Pool health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 62db05047b5a4b8797eee5667bb3bdf4
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.pools["{#NAME}",health])=3'
+ name: 'Pool [{#NAME}]: Pool health is in unknown state'
+ priority: INFO
+ description: 'Pool [{#NAME}] health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ graph_prototypes:
+ -
+ uuid: 93151c5760fb405498d1df049185ffe7
+ name: 'Pool [{#NAME}]: Space utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.pools.space["{#NAME}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.pools.space["{#NAME}",total]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''pools'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: bed52618dbc6498f99ddeedc78c0cdad
+ name: 'Ports discovery'
+ type: DEPENDENT
+ key: hpe.msa.ports.discovery
+ delay: '0'
+ description: 'Discover ports.'
+ item_prototypes:
+ -
+ uuid: cf4f9aaf55e6435d949d3b5074b9f37f
+ name: 'Port [{#NAME}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.ports["{#NAME}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Port health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''ports''][?(@[''port''] == "{#NAME}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 9775011d59a846669087e6c90c4a011a
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=1'
+ name: 'Port [{#NAME}]: Port health is in degraded state'
+ priority: WARNING
+ description: 'Port health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: a5dec537528f42e0948ea15f1a290f26
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=2'
+ name: 'Port [{#NAME}]: Port health is in fault state'
+ priority: AVERAGE
+ description: 'Port health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 7025a0e6c93e4731be966c2a9e774581
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",health])=3'
+ name: 'Port [{#NAME}]: Port health is in unknown state'
+ priority: INFO
+ description: 'Port health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: cab1cd26264d408998c5ea8737571ed4
+ name: 'Port [{#NAME}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.ports["{#NAME}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Port status.'
+ valuemap:
+ name: Status
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''ports''][?(@[''port''] == "{#NAME}")].[''status-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: c1d2f824a3d4470abb6817753b1d4047
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=2'
+ name: 'Port [{#NAME}]: Port has error status'
+ priority: AVERAGE
+ description: 'Port has error status.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 6083cdfcb59848a6b5249147155996c2
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=4'
+ name: 'Port [{#NAME}]: Port has unknown status'
+ priority: INFO
+ description: 'Port has unknown status.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: dd32b960ce1544d880d94b2da4dba03e
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.ports["{#NAME}",status])=1'
+ name: 'Port [{#NAME}]: Port has warning status'
+ priority: WARNING
+ description: 'Port has warning status.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: 32ad6655625e408a9dd577624afbfa6a
+ name: 'Port [{#NAME}]: Type'
+ type: DEPENDENT
+ key: 'hpe.msa.ports["{#NAME}",type]'
+ delay: '0'
+ history: 7d
+ description: 'Port type.'
+ valuemap:
+ name: 'Port type'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''ports''][?(@[''port''] == "{#NAME}")].[''port-type-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NAME}'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''port'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''ports'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 1561695bd2174eada622a0d90ee1c3df
+ name: 'Power supplies discovery'
+ type: DEPENDENT
+ key: hpe.msa.power_supplies.discovery
+ delay: '0'
+ description: 'Discover power supplies.'
+ item_prototypes:
+ -
+ uuid: 993bc2db3b444dc5bc37794985e63ea9
+ name: 'Power supply [{#DURABLE.ID}]: Health'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",health]'
+ delay: '0'
+ history: 7d
+ description: 'Power supply health status.'
+ valuemap:
+ name: Health
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''health-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 1b512fda735440b5839a63fd26c19535
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=1'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply health is in degraded state'
+ priority: WARNING
+ description: 'Power supply health is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ -
+ uuid: b75fb541ae0e43cc9cdb86e07dc3e394
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=2'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply health is in fault state'
+ priority: AVERAGE
+ description: 'Power supply health is in fault state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 555ee9ef33b54d029df2f17d5f899539
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",health])=3'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply health is in unknown state'
+ priority: INFO
+ description: 'Power supply health is in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: efae55cfdd1e4021a623e2128f988611
+ name: 'Power supply [{#DURABLE.ID}]: Part number'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",part_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Power supply part number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''part-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ -
+ uuid: 6716c3d0177247fe8a35fa1eb206a54f
+ name: 'Power supply [{#DURABLE.ID}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Power supply serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''serial-number''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ -
+ uuid: a3ff6ab5576246fe9e794e01df4fe1b9
+ name: 'Power supply [{#DURABLE.ID}]: Status'
+ type: DEPENDENT
+ key: 'hpe.msa.power_supplies["{#DURABLE.ID}",status]'
+ delay: '0'
+ history: 7d
+ description: 'Power supply status.'
+ valuemap:
+ name: Status
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies''][?(@[''durable-id''] == "{#DURABLE.ID}")].[''status-numeric''].first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '4'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: power-supply
+ -
+ tag: power-supply
+ value: '{#DURABLE.ID}'
+ trigger_prototypes:
+ -
+ uuid: 49c9d2d61c45476da5564299b2eebdee
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=2'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply has error status'
+ priority: AVERAGE
+ description: 'Power supply has error status.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: d6cbaeb5aab84e5eb487af4bf319d640
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=4'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply has unknown status'
+ priority: INFO
+ description: 'Power supply has unknown status.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: b7e85e7a6c254aba930d7704c58adf47
+ expression: 'last(/HPE MSA 2060 Storage by HTTP/hpe.msa.power_supplies["{#DURABLE.ID}",status])=1'
+ name: 'Power supply [{#DURABLE.ID}]: Power supply has warning status'
+ priority: WARNING
+ description: 'Power supply has warning status.'
+ tags:
+ -
+ tag: scope
+ value: performance
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#DURABLE.ID}'
+ path: '$.[''durable-id'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''power-supplies'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: b132a010c8a84da79eee1ba725301be9
+ name: 'Volumes discovery'
+ type: DEPENDENT
+ key: hpe.msa.volumes.discovery
+ delay: '0'
+ description: 'Discover volumes.'
+ item_prototypes:
+ -
+ uuid: cc6c4bddc05243c7a90082a3450a76a7
+ name: 'Volume [{#NAME}]: Blocks allocated'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.blocks["{#NAME}",allocated]'
+ delay: '0'
+ history: 7d
+ description: 'The amount of blocks currently allocated to the volume.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volumes''][?(@[''volume-name''] == "{#NAME}")].[''allocated-size-numeric''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 900d94185fa9480590915bbafb8ccda0
+ name: 'Volume [{#NAME}]: Blocks size'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.blocks["{#NAME}",size]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'The size of a block, in bytes.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volumes''][?(@[''volume-name''] == "{#NAME}")].[''blocksize''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 5cdae787c8b6485899f8f4e8c3cf6b71
+ name: 'Volume [{#NAME}]: Blocks total'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.blocks["{#NAME}",total]'
+ delay: '0'
+ history: 7d
+ description: 'Total space in blocks.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volumes''][?(@[''volume-name''] == "{#NAME}")].[''blocks''].first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: b7615bb6a3434303a2bb4751e7aed458
+ name: 'Volume [{#NAME}]: Cache: Read hits, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.cache.read.hits["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block to be read is found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''read-cache-hits''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 655e319736804d8db4b6988f7205c5e3
+ name: 'Volume [{#NAME}]: Cache: Read misses, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.cache.read.misses["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block to be read is not found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''read-cache-misses''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 849ef4370f4b46ea894d2a2e1e4a3ea4
+ name: 'Volume [{#NAME}]: Cache: Write hits, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.cache.write.hits["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block written to is found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''write-cache-hits''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 593f4fce31f24e9b99c9bc69d2ead38b
+ name: 'Volume [{#NAME}]: Cache: Write misses, rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.cache.write.misses["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ description: 'For the controller that owns the volume, the number of times the block written to is not found in cache per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''write-cache-misses''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 1a810fe32e464e8cbcdfc61769bc7869
+ name: 'Volume [{#NAME}]: Data transfer rate: Reads'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.data_transfer.reads["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data read rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''data-read-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 4dd1d47335a9425a94ffcee4c8ed2216
+ name: 'Volume [{#NAME}]: Data transfer rate: Total'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.data_transfer.total["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ units: Bps
+ description: 'The data transfer rate, in bytes per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''bytes-per-second-numeric''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: d5198b50ba8f4db1aa160d0208540a74
+ name: 'Volume [{#NAME}]: Data transfer rate: Writes'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.data_transfer.writes["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: Bps
+ description: 'The data write rate, in bytes per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''data-written-numeric''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 00f5c3f9d19d450e999c389ba297fb41
+ name: 'Volume [{#NAME}]: IOPS, read rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.iops.read["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!r/s'
+ description: 'Number of read operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''number-of-reads''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: b925122eda0c4c1380b843bc764ed122
+ name: 'Volume [{#NAME}]: IOPS, total rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.iops.total["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ units: '!iops'
+ description: 'Total input/output operations per second, calculated over the interval since these statistics were last requested or reset. This value will be zero if it has not been requested or reset since a controller restart.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''iops''].first()'
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: a9fcc1525204489cad52cf4e88518064
+ name: 'Volume [{#NAME}]: IOPS, write rate'
+ type: DEPENDENT
+ key: 'hpe.msa.volumes.iops.write["{#NAME}",rate]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ units: '!w/s'
+ description: 'Number of write operations per second.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volume-statistics''][?(@[''volume-name''] == "{#NAME}")].[''number-of-writes''].first()'
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ master_item:
+ key: hpe.msa.data.get
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 860855a80c554e0685d4d4125342b547
+ name: 'Volume [{#NAME}]: Space allocated'
+ type: CALCULATED
+ key: 'hpe.msa.volumes.space["{#NAME}",allocated]'
+ history: 7d
+ units: B
+ params: 'last(//hpe.msa.volumes.blocks["{#NAME}",size])*last(//hpe.msa.volumes.blocks["{#NAME}",allocated])'
+ description: 'The amount of space currently allocated to the volume.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: eb09d8791bb84c8aadf5cdcac3d76413
+ name: 'Volume [{#NAME}]: Space total'
+ type: CALCULATED
+ key: 'hpe.msa.volumes.space["{#NAME}",total]'
+ history: 7d
+ units: B
+ params: 'last(//hpe.msa.volumes.blocks["{#NAME}",size])*last(//hpe.msa.volumes.blocks["{#NAME}",total])'
+ description: 'The capacity of the volume.'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ tags:
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ graph_prototypes:
+ -
+ uuid: 8905b826b774473991f74b927716322e
+ name: 'Volume [{#NAME}]: Cache usage'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.cache.read.hits["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.cache.read.misses["{#NAME}",rate]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.cache.write.hits["{#NAME}",rate]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.cache.write.misses["{#NAME}",rate]'
+ -
+ uuid: 1bd9df7bab9c4f3a978810c82cc61f42
+ name: 'Volume [{#NAME}]: Data transfer rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.data_transfer.reads["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.data_transfer.writes["{#NAME}",rate]'
+ -
+ uuid: 24dfc70c5d724f13ac1cec6b229c7fe9
+ name: 'Volume [{#NAME}]: Disk operations rate'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.iops.read["{#NAME}",rate]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.iops.write["{#NAME}",rate]'
+ -
+ uuid: 5a316cdf8c6f42acb3cb7a158861145a
+ name: 'Volume [{#NAME}]: Space utilization'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.space["{#NAME}",allocated]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE MSA 2060 Storage by HTTP'
+ key: 'hpe.msa.volumes.space["{#NAME}",total]'
+ master_item:
+ key: hpe.msa.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#NAME}'
+ path: '$.[''volume-name'']'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.[''volumes'']'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ tags:
+ -
+ tag: class
+ value: storage
+ -
+ tag: target
+ value: hpe
+ -
+ tag: target
+ value: msa-2060
+ macros:
+ -
+ macro: '{$HPE.MSA.API.PASSWORD}'
+ type: SECRET_TEXT
+ description: 'Specify password for API.'
+ -
+ macro: '{$HPE.MSA.API.PORT}'
+ value: '443'
+ description: 'Connection port for API.'
+ -
+ macro: '{$HPE.MSA.API.SCHEME}'
+ value: https
+ description: 'Connection scheme for API.'
+ -
+ macro: '{$HPE.MSA.API.USERNAME}'
+ value: zabbix
+ description: 'Specify user name for API.'
+ -
+ macro: '{$HPE.MSA.CONTROLLER.CPU.UTIL.CRIT}'
+ value: '90'
+ description: 'The critical threshold of the CPU utilization in %.'
+ -
+ macro: '{$HPE.MSA.DATA.TIMEOUT}'
+ value: 30s
+ description: 'Response timeout for API.'
+ -
+ macro: '{$HPE.MSA.DISKS.GROUP.PUSED.MAX.CRIT}'
+ value: '90'
+ description: 'The critical threshold of the disk group space utilization in %.'
+ -
+ macro: '{$HPE.MSA.DISKS.GROUP.PUSED.MAX.WARN}'
+ value: '80'
+ description: 'The warning threshold of the disk group space utilization in %.'
+ -
+ macro: '{$HPE.MSA.POOL.PUSED.MAX.CRIT}'
+ value: '90'
+ description: 'The critical threshold of the pool space utilization in %.'
+ -
+ macro: '{$HPE.MSA.POOL.PUSED.MAX.WARN}'
+ value: '80'
+ description: 'The warning threshold of the pool space utilization in %.'
+ valuemaps:
+ -
+ uuid: f7af1259f3c54a5faa040c743d386d1d
+ name: 'Controller status'
+ mappings:
+ -
+ value: '0'
+ newvalue: Operational
+ -
+ value: '1'
+ newvalue: Down
+ -
+ value: '2'
+ newvalue: 'Not Installed'
+ -
+ uuid: 6bb0dfe12f4249ef9f4b804885c70c60
+ name: 'Disk group status'
+ mappings:
+ -
+ value: '0'
+ newvalue: FTOL
+ -
+ value: '1'
+ newvalue: FTDN
+ -
+ value: '2'
+ newvalue: CRIT
+ -
+ value: '3'
+ newvalue: OFFL
+ -
+ value: '4'
+ newvalue: QTCR
+ -
+ value: '5'
+ newvalue: QTOF
+ -
+ value: '6'
+ newvalue: QTDN
+ -
+ value: '7'
+ newvalue: STOP
+ -
+ value: '8'
+ newvalue: MSNG
+ -
+ value: '9'
+ newvalue: DMGD
+ -
+ value: '11'
+ newvalue: QTDN
+ -
+ value: '250'
+ newvalue: UP
+ -
+ uuid: de0e7d801a9b42cf80fe4c71c0eed982
+ name: 'Disk temperature status'
+ mappings:
+ -
+ value: '1'
+ newvalue: OK
+ -
+ value: '2'
+ newvalue: Critical
+ -
+ value: '3'
+ newvalue: Warning
+ -
+ value: '4'
+ newvalue: Unknown
+ -
+ uuid: 10547e62c7bb4581b347bc523ef03582
+ name: 'Disk type'
+ mappings:
+ -
+ value: '4'
+ newvalue: SAS
+ -
+ value: '8'
+ newvalue: 'SSD SAS'
+ -
+ value: '11'
+ newvalue: 'SAS MDL'
+ -
+ uuid: 37317f19f7d74b8fa61dd1b28e6f4d42
+ name: 'Enclosure status'
+ mappings:
+ -
+ value: '0'
+ newvalue: Unsupported
+ -
+ value: '1'
+ newvalue: OK
+ -
+ value: '2'
+ newvalue: Critical
+ -
+ value: '3'
+ newvalue: Warning
+ -
+ value: '4'
+ newvalue: Unrecoverable
+ -
+ value: '5'
+ newvalue: 'Not installed'
+ -
+ value: '6'
+ newvalue: Unknown
+ -
+ value: '7'
+ newvalue: Unavailable
+ -
+ uuid: 1acc14c82fba4c3daa207d0ce9b702f2
+ name: 'Fan status'
+ mappings:
+ -
+ value: '0'
+ newvalue: Up
+ -
+ value: '1'
+ newvalue: Error
+ -
+ value: '2'
+ newvalue: 'Off'
+ -
+ value: '3'
+ newvalue: Missing
+ -
+ uuid: 284ed898fb7c46ecb8d719646445264c
+ name: 'FRU status'
+ mappings:
+ -
+ value: '0'
+ newvalue: 'Invalid data'
+ -
+ value: '1'
+ newvalue: Fault
+ -
+ value: '2'
+ newvalue: Absent
+ -
+ value: '3'
+ newvalue: 'Power off'
+ -
+ value: '4'
+ newvalue: OK
+ -
+ uuid: cb8c3d00dfd4456181765b8b350ea4d2
+ name: Health
+ mappings:
+ -
+ value: '0'
+ newvalue: OK
+ -
+ value: '1'
+ newvalue: Degraded
+ -
+ value: '2'
+ newvalue: Fault
+ -
+ value: '3'
+ newvalue: Unknown
+ -
+ value: '4'
+ newvalue: N/A
+ -
+ uuid: ec101e7d212747779ed56ef9dbf72e2b
+ name: 'Port type'
+ mappings:
+ -
+ value: '0'
+ newvalue: Unknown
+ -
+ value: '6'
+ newvalue: FC
+ -
+ value: '8'
+ newvalue: SAS
+ -
+ value: '9'
+ newvalue: iSCSI
+ -
+ uuid: 171c9abf20514b0fb78d532bd987881b
+ name: 'RAID type'
+ mappings:
+ -
+ value: '0'
+ newvalue: RAID0
+ -
+ value: '1'
+ newvalue: RAID1
+ -
+ value: '2'
+ newvalue: MSA-DP+
+ -
+ value: '5'
+ newvalue: RAID5
+ -
+ value: '6'
+ newvalue: NRAID
+ -
+ value: '10'
+ newvalue: RAID10
+ -
+ value: '11'
+ newvalue: RAID6
+ -
+ uuid: 402b0dacf14a4436b0d3cfe237bf1e86
+ name: Status
+ mappings:
+ -
+ value: '0'
+ newvalue: Up
+ -
+ value: '1'
+ newvalue: Warning
+ -
+ value: '2'
+ newvalue: Error
+ -
+ value: '3'
+ newvalue: 'Not present'
+ -
+ value: '4'
+ newvalue: Unknown
+ -
+ value: '6'
+ newvalue: Disconnected
diff --git a/templates/san/hpe_primera_http/README.md b/templates/san/hpe_primera_http/README.md
new file mode 100644
index 00000000000..70db3114347
--- /dev/null
+++ b/templates/san/hpe_primera_http/README.md
@@ -0,0 +1,189 @@
+
+# HPE Primera by HTTP
+
+## Overview
+
+For Zabbix version: 6.0 and higher
+The template to monitor HPE Primera by HTTP.
+It works without any external scripts and uses the script item.
+
+This template was tested on:
+
+- HPE Primera, version 4.2.1.6
+
+## Setup
+
+> See [Zabbix template operation](https://www.zabbix.com/documentation/6.0/manual/config/templates_out_of_the_box/http) for basic instructions.
+
+1. Create user zabbix on the storage with browse role and enable it for all domains.
+2. The WSAPI server does not start automatically.
+ Log in to the CLI as Super, Service, or any role granted the wsapi_set right.
+ Start the WSAPI server by command: `startwsapi`.
+ To check WSAPI state use command: `showwsapi`.
+3. Link template to the host.
+4. Configure macros {$HPE.PRIMERA.API.USERNAME} and {$HPE.PRIMERA.API.PASSWORD}.
+
+## Zabbix configuration
+
+No specific Zabbix configuration is required.
+
+### Macros used
+
+|Name|Description|Default|
+|----|-----------|-------|
+|{$HPE.PRIMERA.API.PASSWORD} |<p>Specify password for WSAPI.</p> |`` |
+|{$HPE.PRIMERA.API.PORT} |<p>The WSAPI port.</p> |`443` |
+|{$HPE.PRIMERA.API.SCHEME} |<p>The WSAPI scheme (http/https).</p> |`https` |
+|{$HPE.PRIMERA.API.USERNAME} |<p>Specify user name for WSAPI.</p> |`zabbix` |
+|{$HPE.PRIMERA.CPG.NAME.MATCHES} |<p>This macro is used in filters of CPGs discovery rule.</p> |`.*` |
+|{$HPE.PRIMERA.CPG.NAME.NOT_MATCHES} |<p>This macro is used in filters of CPGs discovery rule.</p> |`CHANGE_IF_NEEDED` |
+|{$HPE.PRIMERA.DATA.TIMEOUT} |<p>Response timeout for WSAPI.</p> |`15s` |
+|{$HPE.PRIMERA.LLD.FILTER.TASK.NAME.MATCHES} |<p>Filter of discoverable tasks by name.</p> |`CHANGE_IF_NEEDED` |
+|{$HPE.PRIMERA.LLD.FILTER.TASK.NAME.NOT_MATCHES} |<p>Filter to exclude discovered tasks by name.</p> |`.*` |
+|{$HPE.PRIMERA.LLD.FILTER.TASK.TYPE.MATCHES} |<p>Filter of discoverable tasks by type.</p> |`.*` |
+|{$HPE.PRIMERA.LLD.FILTER.TASK.TYPE.NOT_MATCHES} |<p>Filter to exclude discovered tasks by type.</p> |`CHANGE_IF_NEEDED` |
+|{$HPE.PRIMERA.VOLUME.NAME.MATCHES} |<p>This macro is used in filters of volume discovery rule.</p> |`.*` |
+|{$HPE.PRIMERA.VOLUME.NAME.NOT_MATCHES} |<p>This macro is used in filters of volume discovery rule.</p> |`^(admin|.srdata|.mgmtdata)$` |
+
+## Template links
+
+There are no template links in this template.
+
+## Discovery rules
+
+|Name|Description|Type|Key and additional info|
+|----|-----------|----|----|
+|Common provisioning groups discovery |<p>List of CPGs resources.</p> |DEPENDENT |hpe.primera.cpg.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$HPE.PRIMERA.CPG.NAME.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$HPE.PRIMERA.CPG.NAME.NOT_MATCHES}`</p> |
+|Disks discovery |<p>List of physical disk resources.</p> |DEPENDENT |hpe.primera.disks.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|Hosts discovery |<p>List of host properties.</p> |DEPENDENT |hpe.primera.hosts.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.hosts.members`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Filter**:</p>AND <p>- {#NAME} EXISTS</p> |
+|Ports discovery |<p>List of ports.</p> |DEPENDENT |hpe.primera.ports.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.ports.members`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Filter**:</p>AND <p>- {#TYPE} NOT_MATCHES_REGEX `3`</p> |
+|Tasks discovery |<p>List of tasks started within last 24 hours.</p> |DEPENDENT |hpe.primera.tasks.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.tasks`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$HPE.PRIMERA.LLD.FILTER.TASK.NAME.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$HPE.PRIMERA.LLD.FILTER.TASK.NAME.NOT_MATCHES}`</p><p>- {#TYPE} MATCHES_REGEX `{$HPE.PRIMERA.LLD.FILTER.TASK.TYPE.MATCHES}`</p><p>- {#TYPE} NOT_MATCHES_REGEX `{$HPE.PRIMERA.LLD.FILTER.TASK.TYPE.NOT_MATCHES}`</p> |
+|Volumes discovery |<p>List of storage volume resources.</p> |DEPENDENT |hpe.primera.volumes.discovery<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>**Filter**:</p>AND <p>- {#NAME} MATCHES_REGEX `{$HPE.PRIMERA.VOLUME.NAME.MATCHES}`</p><p>- {#NAME} NOT_MATCHES_REGEX `{$HPE.PRIMERA.VOLUME.NAME.NOT_MATCHES}`</p> |
+
+## Items collected
+
+|Group|Name|Description|Type|Key and additional info|
+|-----|----|-----------|----|---------------------|
+|HPE |HPE Primera: Get data |<p>The JSON with result of WSAPI requests.</p> |SCRIPT |hpe.primera.data.get<p>**Expression**:</p>`The text is too long. Please see the template.` |
+|HPE |HPE Primera: Get errors |<p>A list of errors from WSAPI requests.</p> |DEPENDENT |hpe.primera.data.errors<p>**Preprocessing**:</p><p>- JSONPATH: `$.errors`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |HPE Primera: Capacity allocated |<p>Allocated capacity in the system.</p> |DEPENDENT |hpe.primera.system.capacity.allocated<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.allocatedCapacityMiB`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |HPE Primera: Chunklet size |<p>Chunklet size.</p> |DEPENDENT |hpe.primera.system.chunklet.size<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.chunkletSizeMiB`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |HPE Primera: System contact |<p>Contact of the system.</p> |DEPENDENT |hpe.primera.system.contact<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.contact`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |HPE Primera: Capacity failed |<p>Failed capacity in the system.</p> |DEPENDENT |hpe.primera.system.capacity.failed<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.failedCapacityMiB`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |HPE Primera: Capacity free |<p>Free capacity in the system.</p> |DEPENDENT |hpe.primera.system.capacity.free<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.freeCapacityMiB`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |HPE Primera: System location |<p>Location of the system.</p> |DEPENDENT |hpe.primera.system.location<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.location`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |HPE Primera: Model |<p>System model.</p> |DEPENDENT |hpe.primera.system.model<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.model`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |HPE Primera: System name |<p>System name.</p> |DEPENDENT |hpe.primera.system.name<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.name`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|HPE |HPE Primera: Serial number |<p>System serial number.</p> |DEPENDENT |hpe.primera.system.serial_number<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.serialNumber`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |HPE Primera: Software version number |<p>Storage system software version number.</p> |DEPENDENT |hpe.primera.system.sw_version<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.systemVersion`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |HPE Primera: Capacity total |<p>Total capacity in the system.</p> |DEPENDENT |hpe.primera.system.capacity.total<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.totalCapacityMiB`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |HPE Primera: Nodes total |<p>Total number of nodes in the system.</p> |DEPENDENT |hpe.primera.system.nodes.total<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.totalNodes`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |HPE Primera: Nodes online |<p>Number of online nodes in the system.</p> |DEPENDENT |hpe.primera.system.nodes.online<p>**Preprocessing**:</p><p>- JSONPATH: `$.system.onlineNodes.length()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |HPE Primera: Disks total |<p>Number of physical disks.</p> |DEPENDENT |hpe.primera.disks.total<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.total`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |HPE Primera: Service ping |<p>Checks if the service is running and accepting TCP connections.</p> |SIMPLE |net.tcp.service["{$HPE.PRIMERA.API.SCHEME}","{HOST.CONN}","{$HPE.PRIMERA.API.PORT}"]<p>**Preprocessing**:</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|HPE |CPG [{#NAME}]: Degraded state |<p>Detailed state of the CPG:</p><p>LDS_NOT_STARTED (1) - LDs not started.</p><p>NOT_STARTED (2) - VV not started.</p><p>NEEDS_CHECK (3) - check for consistency.</p><p>NEEDS_MAINT_CHECK (4) - maintenance check is required.</p><p>INTERNAL_CONSISTENCY_ERROR (5) - internal consistency error.</p><p>SNAPDATA_INVALID (6) - invalid snapshot data.</p><p>PRESERVED (7) - unavailable LD sets due to missing chunklets. Preserved remaining VV data.</p><p>STALE (8) - parts of the VV contain old data because of a copy-on-write operation.</p><p>COPY_FAILED (9) - a promote or copy operation to this volume failed.</p><p>DEGRADED_AVAIL (10) - degraded due to availability.</p><p>DEGRADED_PERF (11) - degraded due to performance.</p><p>PROMOTING (12) - volume is the current target of a promote operation.</p><p>COPY_TARGET (13) - volume is the current target of a physical copy operation.</p><p>RESYNC_TARGET (14) - volume is the current target of a resynchronized copy operation.</p><p>TUNING (15) - volume tuning is in progress.</p><p>CLOSING (16) - volume is closing.</p><p>REMOVING (17) - removing the volume.</p><p>REMOVING_RETRY (18) - retrying a volume removal operation.</p><p>CREATING (19) - creating a volume.</p><p>COPY_SOURCE (20) - copy source.</p><p>IMPORTING (21) - importing a volume.</p><p>CONVERTING (22) - converting a volume.</p><p>INVALID (23) - invalid.</p><p>EXCLUSIVE (24) - local storage system has exclusive access to the volume.</p><p>CONSISTENT (25) - volume is being imported consistently along with other volumes in the VV set.</p><p>STANDBY (26) - volume in standby mode.</p><p>SD_META_INCONSISTENT (27) - SD Meta Inconsistent.</p><p>SD_NEEDS_FIX (28) - SD needs fix.</p><p>SD_META_FIXING (29) - SD meta fix.</p><p>UNKNOWN (999) - unknown state.</p><p>NOT_SUPPORTED_BY_WSAPI (1000) - state not supported by WSAPI.</p> |DEPENDENT |hpe.primera.cpg.state["{#ID}",degraded]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].degradedStates.first()`</p> |
+|HPE |CPG [{#NAME}]: Failed state |<p>Detailed state of the CPG:</p><p>LDS_NOT_STARTED (1) - LDs not started.</p><p>NOT_STARTED (2) - VV not started.</p><p>NEEDS_CHECK (3) - check for consistency.</p><p>NEEDS_MAINT_CHECK (4) - maintenance check is required.</p><p>INTERNAL_CONSISTENCY_ERROR (5) - internal consistency error.</p><p>SNAPDATA_INVALID (6) - invalid snapshot data.</p><p>PRESERVED (7) - unavailable LD sets due to missing chunklets. Preserved remaining VV data.</p><p>STALE (8) - parts of the VV contain old data because of a copy-on-write operation.</p><p>COPY_FAILED (9) - a promote or copy operation to this volume failed.</p><p>DEGRADED_AVAIL (10) - degraded due to availability.</p><p>DEGRADED_PERF (11) - degraded due to performance.</p><p>PROMOTING (12) - volume is the current target of a promote operation.</p><p>COPY_TARGET (13) - volume is the current target of a physical copy operation.</p><p>RESYNC_TARGET (14) - volume is the current target of a resynchronized copy operation.</p><p>TUNING (15) - volume tuning is in progress.</p><p>CLOSING (16) - volume is closing.</p><p>REMOVING (17) - removing the volume.</p><p>REMOVING_RETRY (18) - retrying a volume removal operation.</p><p>CREATING (19) - creating a volume.</p><p>COPY_SOURCE (20) - copy source.</p><p>IMPORTING (21) - importing a volume.</p><p>CONVERTING (22) - converting a volume.</p><p>INVALID (23) - invalid.</p><p>EXCLUSIVE (24) - local storage system has exclusive access to the volume.</p><p>CONSISTENT (25) - volume is being imported consistently along with other volumes in the VV set.</p><p>STANDBY (26) - volume in standby mode.</p><p>SD_META_INCONSISTENT (27) - SD Meta Inconsistent.</p><p>SD_NEEDS_FIX (28) - SD needs fix.</p><p>SD_META_FIXING (29) - SD meta fix.</p><p>UNKNOWN (999) - unknown state.</p><p>NOT_SUPPORTED_BY_WSAPI (1000) - state not supported by WSAPI.</p> |DEPENDENT |hpe.primera.cpg.state["{#ID}",failed]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].failedStates.first()`</p><p>- JAVASCRIPT: `return JSON.stringify(JSON.parse(value));`</p> |
+|HPE |CPG [{#NAME}]: CPG space: Free |<p>Free CPG space.</p> |DEPENDENT |hpe.primera.cpg.space["{#ID}",free]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].freeSpaceMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Number of FPVVs |<p>Number of FPVVs (Fully Provisioned Virtual Volumes) allocated in the CPG.</p> |DEPENDENT |hpe.primera.cpg.fpvv["{#ID}",count]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].numFPVVs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |CPG [{#NAME}]: Number of TPVVs |<p>Number of TPVVs (Thinly Provisioned Virtual Volumes) allocated in the CPG.</p> |DEPENDENT |hpe.primera.cpg.tpvv["{#ID}",count]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].numTPVVs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |CPG [{#NAME}]: Number of TDVVs |<p>Number of TDVVs (Thinly Deduplicated Virtual Volume) created in the CPG.</p> |DEPENDENT |hpe.primera.cpg.tdvv["{#ID}",count]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].numTDVVs.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |CPG [{#NAME}]: Raw space: Free |<p>Raw free space.</p> |DEPENDENT |hpe.primera.cpg.space.raw["{#ID}",free]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].rawFreeSpaceMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Raw space: Shared |<p>Raw shared space.</p> |DEPENDENT |hpe.primera.cpg.space.raw["{#ID}",shared]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].rawSharedSpaceMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Raw space: Total |<p>Raw total space.</p> |DEPENDENT |hpe.primera.cpg.space.raw["{#ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].rawTotalSpaceMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: CPG space: Shared |<p>Shared CPG space.</p> |DEPENDENT |hpe.primera.cpg.space["{#ID}",shared]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].sharedSpaceMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: State |<p>Overall state of the CPG:</p><p>NORMAL (1) - normal operation;</p><p>DEGRADED (2) - degraded state;</p><p>FAILED (3) - abnormal operation;</p><p>UNKNOWN (99) - unknown state.</p> |DEPENDENT |hpe.primera.cpg.state["{#ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].state.first()`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: Snapshot administration: Total (raw) |<p>Total physical (raw) logical disk space in snapshot administration.</p> |DEPENDENT |hpe.primera.cpg.space.sa["{#ID}",raw_total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].SAUsage.rawTotalMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: Snapshot data: Total (raw) |<p>Total physical (raw) logical disk space in snapshot data space.</p> |DEPENDENT |hpe.primera.cpg.space.sd["{#ID}",raw_total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].SDUsage.rawTotalMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: User space: Total (raw) |<p>Total physical (raw) logical disk space in user data space.</p> |DEPENDENT |hpe.primera.cpg.space.usr["{#ID}",raw_total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].UsrUsage.rawTotalMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: Snapshot administration: Total |<p>Total logical disk space in snapshot administration.</p> |DEPENDENT |hpe.primera.cpg.space.sa["{#ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].SAUsage.totalMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: Snapshot data: Total |<p>Total logical disk space in snapshot data space.</p> |DEPENDENT |hpe.primera.cpg.space.sd["{#ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].SDUsage.totalMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: User space: Total |<p>Total logical disk space in user data space.</p> |DEPENDENT |hpe.primera.cpg.space.usr["{#ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].UsrUsage.totalMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: CPG space: Total |<p>Total CPG space.</p> |DEPENDENT |hpe.primera.cpg.space["{#ID}",total]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].totalSpaceMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: Snapshot administration: Used (raw) |<p>Amount of physical (raw) logical disk used in snapshot administration.</p> |DEPENDENT |hpe.primera.cpg.space.sa["{#ID}",raw_used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].SAUsage.rawUsedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: Snapshot data: Used (raw) |<p>Amount of physical (raw) logical disk used in snapshot data space.</p> |DEPENDENT |hpe.primera.cpg.space.sd["{#ID}",raw_used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].SDUsage.rawUsedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: User space: Used (raw) |<p>Amount of physical (raw) logical disk used in user data space.</p> |DEPENDENT |hpe.primera.cpg.space.usr["{#ID}",raw_used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].UsrUsage.rawUsedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: Snapshot administration: Used |<p>Amount of logical disk used in snapshot administration.</p> |DEPENDENT |hpe.primera.cpg.space.sa["{#ID}",used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].SAUsage.usedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: Snapshot data: Used |<p>Amount of logical disk used in snapshot data space.</p> |DEPENDENT |hpe.primera.cpg.space.sd["{#ID}",used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].SDUsage.usedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |CPG [{#NAME}]: Logical disk space: User space: Used |<p>Amount of logical disk used in user data space.</p> |DEPENDENT |hpe.primera.cpg.space.usr["{#ID}",used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.cpgs.members[?(@.id == "{#ID}")].UsrUsage.usedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Disk [{#POSITION}]: Firmware version |<p>Physical disk firmware version.</p> |DEPENDENT |hpe.primera.disk["{#ID}",fw_version]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].fwVersion.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#POSITION}]: Free size |<p>Physical disk free size.</p> |DEPENDENT |hpe.primera.disk["{#ID}",free_size]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].freeSizeMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Disk [{#POSITION}]: Manufacturer |<p>Physical disk manufacturer.</p> |DEPENDENT |hpe.primera.disk["{#ID}",manufacturer]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].manufacturer.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#POSITION}]: Model |<p>Manufacturer's device ID for disk.</p> |DEPENDENT |hpe.primera.disk["{#ID}",model]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].model.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#POSITION}]: Path A0 degraded |<p>Indicates if this is a degraded path for the disk.</p> |DEPENDENT |hpe.primera.disk["{#ID}",loop_a0_degraded]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].loopA0.degraded.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- BOOL_TO_DECIMAL</p> |
+|HPE |Disk [{#POSITION}]: Path A1 degraded |<p>Indicates if this is a degraded path for the disk.</p> |DEPENDENT |hpe.primera.disk["{#ID}",loop_a1_degraded]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].loopA1.degraded.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- BOOL_TO_DECIMAL</p> |
+|HPE |Disk [{#POSITION}]: Path B0 degraded |<p>Indicates if this is a degraded path for the disk.</p> |DEPENDENT |hpe.primera.disk["{#ID}",loop_b0_degraded]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].loopB0.degraded.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- BOOL_TO_DECIMAL</p> |
+|HPE |Disk [{#POSITION}]: Path B1 degraded |<p>Indicates if this is a degraded path for the disk.</p> |DEPENDENT |hpe.primera.disk["{#ID}",loop_b1_degraded]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].loopB1.degraded.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- BOOL_TO_DECIMAL</p> |
+|HPE |Disk [{#POSITION}]: RPM |<p>RPM of the physical disk.</p> |DEPENDENT |hpe.primera.disk["{#ID}",rpm]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].RPM.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#POSITION}]: Serial number |<p>Disk drive serial number.</p> |DEPENDENT |hpe.primera.disk["{#ID}",serial_number]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].serialNumber.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Disk [{#POSITION}]: State |<p>State of the physical disk:</p><p>Normal (1) - physical disk is in Normal state;</p><p>Degraded (2) - physical disk is not operating normally;</p><p>New (3) - physical disk is new, needs to be admitted;</p><p>Failed (4) - physical disk has failed;</p><p>Unknown (99) - physical disk state is unknown.</p> |DEPENDENT |hpe.primera.disk["{#ID}",state]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].state.first()`</p><p>⛔️ON_FAIL: `CUSTOM_VALUE -> 99`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Disk [{#POSITION}]: Total size |<p>Physical disk total size.</p> |DEPENDENT |hpe.primera.disk["{#ID}",total_size]<p>**Preprocessing**:</p><p>- JSONPATH: `$.disks.members[?(@.id == "{#ID}")].totalSizeMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Host [{#NAME}]: Comment |<p>Additional information for the host.</p> |DEPENDENT |hpe.primera.host["{#ID}",comment]<p>**Preprocessing**:</p><p>- JSONPATH: `$.hosts.members[?(@.id == "{#ID}")].descriptors.comment.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Host [{#NAME}]: Contact |<p>The host's owner and contact.</p> |DEPENDENT |hpe.primera.host["{#ID}",contact]<p>**Preprocessing**:</p><p>- JSONPATH: `$.hosts.members[?(@.id == "{#ID}")].descriptors.contact.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Host [{#NAME}]: IP address |<p>The host's IP address.</p> |DEPENDENT |hpe.primera.host["{#ID}",ipaddress]<p>**Preprocessing**:</p><p>- JSONPATH: `$.hosts.members[?(@.id == "{#ID}")].descriptors.IPAddr.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Host [{#NAME}]: Location |<p>The host's location.</p> |DEPENDENT |hpe.primera.host["{#ID}",location]<p>**Preprocessing**:</p><p>- JSONPATH: `$.hosts.members[?(@.id == "{#ID}")].descriptors.location.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Host [{#NAME}]: Model |<p>The host's model.</p> |DEPENDENT |hpe.primera.host["{#ID}",model]<p>**Preprocessing**:</p><p>- JSONPATH: `$.hosts.members[?(@.id == "{#ID}")].descriptors.model.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Host [{#NAME}]: OS |<p>The operating system running on the host.</p> |DEPENDENT |hpe.primera.host["{#ID}",os]<p>**Preprocessing**:</p><p>- JSONPATH: `$.hosts.members[?(@.id == "{#ID}")].descriptors.os.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1d`</p> |
+|HPE |Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Failover state |<p>The state of the failover operation, shown for the two ports indicated in the N:S:P and Partner columns. The value can be one of the following:</p><p>none (1) - no failover in operation;</p><p>failover_pending (2) - in the process of failing over to partner;</p><p>failed_over (3) - failed over to partner;</p><p>active (4) - the partner port is failed over to this port;</p><p>active_down (5) - the partner port is failed over to this port, but this port is down;</p><p>active_failed (6) - the partner port is failed over to this port, but this port is down;</p><p>failback_pending (7) - in the process of failing back from partner.</p> |DEPENDENT |hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",failover_state]<p>**Preprocessing**:</p><p>- JSONPATH: `$.ports.members[?(@.portPos.node == "{#NODE}" && @.portPos.slot == "{#SLOT}" && @.portPos.cardPort == "{#CARD.PORT}")].failoverState.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Link state |<p>Port link state:</p><p>CONFIG_WAIT (1) - configuration wait;</p><p>ALPA_WAIT (2) - ALPA wait;</p><p>LOGIN_WAIT (3) - login wait;</p><p>READY (4) - link is ready;</p><p>LOSS_SYNC (5) - link is loss sync;</p><p>ERROR_STATE (6) - in error state;</p><p>XXX (7) - xxx;</p><p>NONPARTICIPATE (8) - link did not participate;</p><p>COREDUMP (9) - taking coredump;</p><p>OFFLINE (10) - link is offline;</p><p>FWDEAD (11) - firmware is dead;</p><p>IDLE_FOR_RESET (12) - link is idle for reset;</p><p>DHCP_IN_PROGRESS (13) - DHCP is in progress;</p><p>PENDING_RESET (14) - link reset is pending;</p><p>NEW (15) - link in new. This value is applicable for only virtual ports;</p><p>DISABLED (16) - link in disabled. This value is applicable for only virtual ports;</p><p>DOWN (17) - link in down. This value is applicable for only virtual ports;</p><p>FAILED (18) - link in failed. This value is applicable for only virtual ports;</p><p>PURGING (19) - link in purging. This value is applicable for only virtual ports.</p> |DEPENDENT |hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state]<p>**Preprocessing**:</p><p>- JSONPATH: `$.ports.members[?(@.portPos.node == "{#NODE}" && @.portPos.slot == "{#SLOT}" && @.portPos.cardPort == "{#CARD.PORT}")].linkState.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Type |<p>Port connection type:</p><p>HOST (1) - FC port connected to hosts or fabric;</p><p>DISK (2) - FC port connected to disks;</p><p>FREE (3) - port is not connected to hosts or disks;</p><p>IPORT (4) - port is in iport mode;</p><p>RCFC (5) - FC port used for remote copy;</p><p>PEER (6) - FC port used for data migration;</p><p>RCIP (7) - IP (Ethernet) port used for remote copy;</p><p>ISCSI (8) - iSCSI (Ethernet) port connected to hosts;</p><p>CNA (9) - CNA port, which can be FCoE or iSCSI;</p><p>FS (10) - Ethernet File Persona ports.</p> |DEPENDENT |hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.ports.members[?(@.portPos.node == "{#NODE}" && @.portPos.slot == "{#SLOT}" && @.portPos.cardPort == "{#CARD.PORT}")].type.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Hardware type |<p>Hardware type:</p><p>FC (1) - Fibre channel HBA;</p><p>ETH (2) - Ethernet NIC;</p><p>iSCSI (3) - iSCSI HBA;</p><p>CNA (4) - Converged network adapter;</p><p>SAS (5) - SAS HBA;</p><p>COMBO (6) - Combo card;</p><p>NVME (7) - NVMe drive;</p><p>UNKNOWN (99) - unknown hardware type.</p> |DEPENDENT |hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",hw_type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.ports.members[?(@.portPos.node == "{#NODE}" && @.portPos.slot == "{#SLOT}" && @.portPos.cardPort == "{#CARD.PORT}")].hardwareType.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Task [{#NAME}]: Finish time |<p>Task finish time.</p> |DEPENDENT |hpe.primera.task["{#ID}",finish_time]<p>**Preprocessing**:</p><p>- JSONPATH: `$.tasks[?(@.id == "{#ID}")].finishTime.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>- NOT_MATCHES_REGEX: `^-$`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|HPE |Task [{#NAME}]: Start time |<p>Task start time.</p> |DEPENDENT |hpe.primera.task["{#ID}",start_time]<p>**Preprocessing**:</p><p>- JSONPATH: `$.tasks[?(@.id == "{#ID}")].startTime.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p><p>- JAVASCRIPT: `The text is too long. Please see the template.`</p> |
+|HPE |Task [{#NAME}]: Status |<p>Task status:</p><p>DONE (1) - task is finished;</p><p>ACTIVE (2) - task is in progress;</p><p>CANCELLED (3) - task is canceled;</p><p>FAILED (4) - task failed.</p> |DEPENDENT |hpe.primera.task["{#ID}",status]<p>**Preprocessing**:</p><p>- JSONPATH: `$.tasks[?(@.id == "{#ID}")].status.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p> |
+|HPE |Task [{#NAME}]: Type |<p>Task type:</p><p>VV_COPY (1) - track the physical copy operations;</p><p>PHYS_COPY_RESYNC (2) - track physical copy resynchronization operations;</p><p>MOVE_REGIONS (3) - track region move operations;</p><p>PROMOTE_SV (4) - track virtual-copy promotions;</p><p>REMOTE_COPY_SYNC (5) - track remote copy group synchronizations;</p><p>REMOTE_COPY_REVERSE (6) - track the reversal of a remote copy group;</p><p>REMOTE_COPY_FAILOVER (7) - track the change-over of a secondary volume group to a primaryvolume group;REMOTE_COPY_RECOVER (8) - track synchronization start after a failover operation from originalsecondary cluster to original primary cluster;</p><p>REMOTE_COPY_RESTORE (9) - tracks the restoration process for groups that have already beenrecovered;</p><p>COMPACT_CPG (10) - track space consolidation in CPGs;</p><p>COMPACT_IDS (11) - track space consolidation in logical disks;</p><p>SNAPSHOT_ACCOUNTING (12) - track progress of snapshot space usage accounting;</p><p>CHECK_VV (13) - track the progress of the check-volume operation;</p><p>SCHEDULED_TASK (14) - track tasks that have been executed by the system scheduler;</p><p>SYSTEM_TASK (15) - track tasks that are periodically run by the storage system;</p><p>BACKGROUND_TASK (16) - track commands started using the starttask command;</p><p>IMPORT_VV (17) - track tasks that migrate data to the local storage system;</p><p>ONLINE_COPY (18) - track physical copy of the volume while online (createvvcopy-online command);</p><p>CONVERT_VV (19) - track tasks that convert a volume from an FPVV to a TPVV, and the reverse;</p><p>BACKGROUND_COMMAND (20) - track background command tasks;</p><p>CLX_SYNC (21) - track CLX synchronization tasks;</p><p>CLX_RECOVERY (22) - track CLX recovery tasks;</p><p>TUNE_SD (23) - tune copy space;</p><p>TUNE_VV (24) - tune virtual volume;</p><p>TUNE_VV_ROLLBACK (25) - tune virtual volume rollback;</p><p>TUNE_VV_RESTART (26) - tune virtual volume restart;</p><p>SYSTEM_TUNING (27) - system tuning;</p><p>NODE_RESCUE (28) - node rescue;</p><p>REPAIR_SYNC (29) - remote copy repair sync;</p><p>REMOTE_COPY_SWOVER (30) - remote copy switchover;</p><p>DEFRAGMENTATION (31) - defragmentation;</p><p>ENCRYPTION_CHANGE (32) - encryption change;</p><p>REMOTE_COPY_FAILSAFE (33) - remote copy failsafe;</p><p>TUNE_TPVV (34) - tune thin virtual volume;</p><p>REMOTE_COPY_CHG_MODE (35) - remote copy change mode;</p><p>ONLINE_PROMOTE (37) - online promote snap;</p><p>RELOCATE_PD (38) - relocate PD;</p><p>PERIODIC_CSS (39) - remote copy periodic CSS;</p><p>TUNEVV_LARGE (40) - tune large virtual volume;</p><p>SD_META_FIXER (41) - compression SD meta fixer;</p><p>DEDUP_DRYRUN (42) - preview dedup ratio;</p><p>COMPR_DRYRUN (43) - compression estimation;</p><p>DEDUP_COMPR_DRYRUN (44) - compression and dedup estimation;</p><p>UNKNOWN (99) - unknown task type.</p> |DEPENDENT |hpe.primera.task["{#ID}",type]<p>**Preprocessing**:</p><p>- JSONPATH: `$.tasks[?(@.id == "{#ID}")].type.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|HPE |Volume [{#NAME}]: Administrative space: Free |<p>Free administrative space.</p> |DEPENDENT |hpe.primera.volume.space.admin["{#ID}",free]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].adminSpace.freeMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Administrative space: Raw reserved |<p>Raw reserved administrative space.</p> |DEPENDENT |hpe.primera.volume.space.admin["{#ID}",raw_reserved]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].adminSpace.rawReservedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Administrative space: Reserved |<p>Reserved administrative space.</p> |DEPENDENT |hpe.primera.volume.space.admin["{#ID}",reserved]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].adminSpace.reservedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Administrative space: Used |<p>Used administrative space.</p> |DEPENDENT |hpe.primera.volume.space.admin["{#ID}",used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].adminSpace.usedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Compaction ratio |<p>The compaction ratio indicates the overall amount of storage space saved with thin technology.</p> |DEPENDENT |hpe.primera.volume.capacity.efficiency["{#ID}",compaction]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.compaction.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Volume [{#NAME}]: Compression state |<p>Volume compression state:</p><p>YES (1) - compression is enabled on the volume;</p><p>NO (2) - compression is disabled on the volume;</p><p>OFF (3) - compression is turned off;</p><p>NA (4) - compression is not available on the volume.</p> |DEPENDENT |hpe.primera.volume.state["{#ID}",compression]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].compressionState.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|HPE |Volume [{#NAME}]: Deduplication state |<p>Volume deduplication state:</p><p>YES (1) - enables deduplication on the volume;</p><p>NO (2) - disables deduplication on the volume;</p><p>NA (3) - deduplication is not available;</p><p>OFF (4) - deduplication is turned off.</p> |DEPENDENT |hpe.primera.volume.state["{#ID}",deduplication]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].deduplicationState.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `6h`</p> |
+|HPE |Volume [{#NAME}]: Degraded state |<p>Volume detailed state:</p><p>LDS_NOT_STARTED (1) - LDs not started.</p><p>NOT_STARTED (2) - VV not started.</p><p>NEEDS_CHECK (3) - check for consistency.</p><p>NEEDS_MAINT_CHECK (4) - maintenance check is required.</p><p>INTERNAL_CONSISTENCY_ERROR (5) - internal consistency error.</p><p>SNAPDATA_INVALID (6) - invalid snapshot data.</p><p>PRESERVED (7) - unavailable LD sets due to missing chunklets. Preserved remaining VV data.</p><p>STALE (8) - parts of the VV contain old data because of a copy-on-write operation.</p><p>COPY_FAILED (9) - a promote or copy operation to this volume failed.</p><p>DEGRADED_AVAIL (10) - degraded due to availability.</p><p>DEGRADED_PERF (11) - degraded due to performance.</p><p>PROMOTING (12) - volume is the current target of a promote operation.</p><p>COPY_TARGET (13) - volume is the current target of a physical copy operation.</p><p>RESYNC_TARGET (14) - volume is the current target of a resynchronized copy operation.</p><p>TUNING (15) - volume tuning is in progress.</p><p>CLOSING (16) - volume is closing.</p><p>REMOVING (17) - removing the volume.</p><p>REMOVING_RETRY (18) - retrying a volume removal operation.</p><p>CREATING (19) - creating a volume.</p><p>COPY_SOURCE (20) - copy source.</p><p>IMPORTING (21) - importing a volume.</p><p>CONVERTING (22) - converting a volume.</p><p>INVALID (23) - invalid.</p><p>EXCLUSIVE (24) -lLocal storage system has exclusive access to the volume.</p><p>CONSISTENT (25) - volume is being imported consistently along with other volumes in the VV set.</p><p>STANDBY (26) - volume in standby mode.</p><p>SD_META_INCONSISTENT (27) - SD Meta Inconsistent.</p><p>SD_NEEDS_FIX (28) - SD needs fix.</p><p>SD_META_FIXING (29) - SD meta fix.</p><p>UNKNOWN (999) - unknown state.</p><p>NOT_SUPPORTED_BY_WSAPI (1000) - state not supported by WSAPI.</p> |DEPENDENT |hpe.primera.volume.state["{#ID}",degraded]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].degradedStates.first()`</p> |
+|HPE |Volume [{#NAME}]: Failed state |<p>Volume detailed state:</p><p>LDS_NOT_STARTED (1) - LDs not started.</p><p>NOT_STARTED (2) - VV not started.</p><p>NEEDS_CHECK (3) - check for consistency.</p><p>NEEDS_MAINT_CHECK (4) - maintenance check is required.</p><p>INTERNAL_CONSISTENCY_ERROR (5) - internal consistency error.</p><p>SNAPDATA_INVALID (6) - invalid snapshot data.</p><p>PRESERVED (7) - unavailable LD sets due to missing chunklets. Preserved remaining VV data.</p><p>STALE (8) - parts of the VV contain old data because of a copy-on-write operation.</p><p>COPY_FAILED (9) - a promote or copy operation to this volume failed.</p><p>DEGRADED_AVAIL (10) - degraded due to availability.</p><p>DEGRADED_PERF (11) - degraded due to performance.</p><p>PROMOTING (12) - volume is the current target of a promote operation.</p><p>COPY_TARGET (13) - volume is the current target of a physical copy operation.</p><p>RESYNC_TARGET (14) - volume is the current target of a resynchronized copy operation.</p><p>TUNING (15) - volume tuning is in progress.</p><p>CLOSING (16) - volume is closing.</p><p>REMOVING (17) - removing the volume.</p><p>REMOVING_RETRY (18) - retrying a volume removal operation.</p><p>CREATING (19) - creating a volume.</p><p>COPY_SOURCE (20) - copy source.</p><p>IMPORTING (21) - importing a volume.</p><p>CONVERTING (22) - converting a volume.</p><p>INVALID (23) - invalid.</p><p>EXCLUSIVE (24) - local storage system has exclusive access to the volume.</p><p>CONSISTENT (25) - volume is being imported consistently along with other volumes in the VV set.</p><p>STANDBY (26) - volume in standby mode.</p><p>SD_META_INCONSISTENT (27) - SD Meta Inconsistent.</p><p>SD_NEEDS_FIX (28) - SD needs fix.</p><p>SD_META_FIXING (29) - SD meta fix.</p><p>UNKNOWN (999) - unknown state.</p><p>NOT_SUPPORTED_BY_WSAPI (1000) - state not supported by WSAPI.</p> |DEPENDENT |hpe.primera.volume.state["{#ID}",failed]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].failedStates.first()`</p><p>- JAVASCRIPT: `return JSON.stringify(JSON.parse(value));`</p> |
+|HPE |Volume [{#NAME}]: Overprovisioning ratio |<p>Overprovisioning capacity efficiency ratio.</p> |DEPENDENT |hpe.primera.volume.capacity.efficiency["{#ID}",overprovisioning]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.overProvisioning.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Volume [{#NAME}]: Remote copy status |<p>Remote copy status of the volume:</p><p>NONE (1) - volume is not associated with remote copy;</p><p>PRIMARY (2) - volume is the primary copy;</p><p>SECONDARY (3) - volume is the secondary copy;</p><p>SNAP (4) - volume is the remote copy snapshot;</p><p>SYNC (5) - volume is a remote copy snapshot being used for synchronization;</p><p>DELETE (6) - volume is a remote copy snapshot that is marked for deletion;</p><p>UNKNOWN (99) - remote copy status is unknown for this volume.</p> |DEPENDENT |hpe.primera.volume.status["{#ID}",rcopy]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].rcopyStatus.first()`</p> |
+|HPE |Volume [{#NAME}]: Snapshot space: Free |<p>Free snapshot space.</p> |DEPENDENT |hpe.primera.volume.space.snapshot["{#ID}",free]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].snapshotSpace.freeMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Snapshot space: Raw reserved |<p>Raw reserved snapshot space.</p> |DEPENDENT |hpe.primera.volume.space.snapshot["{#ID}",raw_reserved]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].snapshotSpace.rawReservedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Snapshot space: Reserved |<p>Reserved snapshot space.</p> |DEPENDENT |hpe.primera.volume.space.snapshot["{#ID}",reserved]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].snapshotSpace.reservedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Snapshot space: Used |<p>Used snapshot space.</p> |DEPENDENT |hpe.primera.volume.space.snapshot["{#ID}",used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].snapshotSpace.usedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: State |<p>State of the volume:</p><p>NORMAL (1) - normal operation;</p><p>DEGRADED (2) - degraded state;</p><p>FAILED (3) - abnormal operation;</p><p>UNKNOWN (99) - unknown state.</p> |DEPENDENT |hpe.primera.volume.state["{#ID}"]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].state.first()`</p> |
+|HPE |Volume [{#NAME}]: Storage space saved using compression |<p>Indicates the amount of storage space saved using compression.</p> |DEPENDENT |hpe.primera.volume.capacity.efficiency["{#ID}",compression]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.compression.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Volume [{#NAME}]: Storage space saved using deduplication |<p>Indicates the amount of storage space saved using deduplication.</p> |DEPENDENT |hpe.primera.volume.capacity.efficiency["{#ID}",deduplication]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.deduplication.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Volume [{#NAME}]: Storage space saved using deduplication and compression |<p>Indicates the amount of storage space saved using deduplication and compression together.</p> |DEPENDENT |hpe.primera.volume.capacity.efficiency["{#ID}",reduction]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.dataReduction.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `1h`</p> |
+|HPE |Volume [{#NAME}]: Total reserved space |<p>Total reserved space.</p> |DEPENDENT |hpe.primera.volume.space.total["{#ID}",reserved]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].totalReservedMiB.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Total space |<p>Virtual size of volume.</p> |DEPENDENT |hpe.primera.volume.space.total["{#ID}",size]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].sizeMiB.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: Total used space |<p>Total used space. Sum of used user space and used snapshot space.</p> |DEPENDENT |hpe.primera.volume.space.total["{#ID}",used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].totalUsedMiB.first()`</p><p>⛔️ON_FAIL: `DISCARD_VALUE -> `</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: User space: Free |<p>Free user space.</p> |DEPENDENT |hpe.primera.volume.space.user["{#ID}",free]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].userSpace.freeMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: User space: Raw reserved |<p>Raw reserved user space.</p> |DEPENDENT |hpe.primera.volume.space.user["{#ID}",raw_reserved]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].userSpace.rawReservedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: User space: Reserved |<p>Reserved user space.</p> |DEPENDENT |hpe.primera.volume.space.user["{#ID}",reserved]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].userSpace.reservedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `12h`</p><p>- MULTIPLIER: `1048576`</p> |
+|HPE |Volume [{#NAME}]: User space: Used |<p>Used user space.</p> |DEPENDENT |hpe.primera.volume.space.user["{#ID}",used]<p>**Preprocessing**:</p><p>- JSONPATH: `$.volumes.members[?(@.id == "{#ID}")].userSpace.usedMiB.first()`</p><p>- DISCARD_UNCHANGED_HEARTBEAT: `10m`</p><p>- MULTIPLIER: `1048576`</p> |
+
+## Triggers
+
+|Name|Description|Expression|Severity|Dependencies and additional info|
+|----|-----------|----|----|----|
+|HPE Primera: There are errors in requests to WSAPI |<p>Zabbix has received errors in requests to WSAPI.</p> |`length(last(/HPE Primera by HTTP/hpe.primera.data.errors))>0` |AVERAGE |<p>**Depends on**:</p><p>- HPE Primera: Service is unavailable</p> |
+|HPE Primera: Service is unavailable |<p>-</p> |`max(/HPE Primera by HTTP/net.tcp.service["{$HPE.PRIMERA.API.SCHEME}","{HOST.CONN}","{$HPE.PRIMERA.API.PORT}"],5m)=0` |HIGH |<p>Manual close: YES</p> |
+|CPG [{#NAME}]: Degraded |<p>CPG [{#NAME}] is in degraded state.</p> |`last(/HPE Primera by HTTP/hpe.primera.cpg.state["{#ID}"])=2` |AVERAGE | |
+|CPG [{#NAME}]: Failed |<p>CPG [{#NAME}] is in failed state.</p> |`last(/HPE Primera by HTTP/hpe.primera.cpg.state["{#ID}"])=3` |HIGH | |
+|Disk [{#POSITION}]: Path A0 degraded |<p>Disk [{#POSITION}] path A0 in degraded state.</p> |`last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",loop_a0_degraded])=1` |AVERAGE | |
+|Disk [{#POSITION}]: Path A1 degraded |<p>Disk [{#POSITION}] path A1 in degraded state.</p> |`last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",loop_a1_degraded])=1` |AVERAGE | |
+|Disk [{#POSITION}]: Path B0 degraded |<p>Disk [{#POSITION}] path B0 in degraded state.</p> |`last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",loop_b0_degraded])=1` |AVERAGE | |
+|Disk [{#POSITION}]: Path B1 degraded |<p>Disk [{#POSITION}] path B1 in degraded state.</p> |`last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",loop_b1_degraded])=1` |AVERAGE | |
+|Disk [{#POSITION}]: Degraded |<p>Disk [{#POSITION}] in degraded state.</p> |`last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",state])=2` |AVERAGE | |
+|Disk [{#POSITION}]: Failed |<p>Disk [{#POSITION}] in failed state.</p> |`last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",state])=3` |HIGH | |
+|Disk [{#POSITION}]: Unknown issue |<p>Disk [{#POSITION}] in unknown state.</p> |`last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",state])=99` |INFO | |
+|Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Failover state is {ITEM.VALUE1} |<p>Port [{#NODE}:{#SLOT}:{#CARD.PORT}] has failover error.</p> |`last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",failover_state])<>1 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",failover_state])<>4` |AVERAGE | |
+|Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Link state is {ITEM.VALUE1} |<p>Port [{#NODE}:{#SLOT}:{#CARD.PORT}] not in ready state.</p> |`last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>4 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>1 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>3 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>13 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>15 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>16` |HIGH | |
+|Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Link state is {ITEM.VALUE1} |<p>Port [{#NODE}:{#SLOT}:{#CARD.PORT}] not in ready state.</p> |`last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=1 or last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=3 or last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=13 or last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=15 or last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=16` |AVERAGE | |
+|Task [{#NAME}]: Cancelled |<p>Task [{#NAME}] is cancelled.</p> |`last(/HPE Primera by HTTP/hpe.primera.task["{#ID}",status])=3` |INFO | |
+|Task [{#NAME}]: Failed |<p>Task [{#NAME}] is failed.</p> |`last(/HPE Primera by HTTP/hpe.primera.task["{#ID}",status])=4` |AVERAGE | |
+|Volume [{#NAME}]: Degraded |<p>Volume [{#NAME}] is in degraded state.</p> |`last(/HPE Primera by HTTP/hpe.primera.volume.state["{#ID}"])=2` |AVERAGE | |
+|Volume [{#NAME}]: Failed |<p>Volume [{#NAME}] is in failed state.</p> |`last(/HPE Primera by HTTP/hpe.primera.volume.state["{#ID}"])=3` |HIGH | |
+
+## Feedback
+
+Please report any issues with the template at https://support.zabbix.com
+
+You can also provide feedback, discuss the template or ask for help with it at [ZABBIX forums](https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/).
+
diff --git a/templates/san/hpe_primera_http/template_san_hpe_primera_http.yaml b/templates/san/hpe_primera_http/template_san_hpe_primera_http.yaml
new file mode 100644
index 00000000000..7b92c4e1dd5
--- /dev/null
+++ b/templates/san/hpe_primera_http/template_san_hpe_primera_http.yaml
@@ -0,0 +1,4681 @@
+zabbix_export:
+ version: '6.0'
+ date: '2022-06-01T08:17:46Z'
+ groups:
+ -
+ uuid: 7c2cb727f85b492d88cd56e17127c64d
+ name: Templates/SAN
+ templates:
+ -
+ uuid: b8750c02b5624c6889979b129735bd56
+ template: 'HPE Primera by HTTP'
+ name: 'HPE Primera by HTTP'
+ description: |
+ The template to monitor HPE Primera by HTTP.
+ It works without any external scripts and uses the script item.
+
+ Setup:
+ 1. Create user zabbix on the storage with browse role and enable it for all domains.
+ 2. The WSAPI server does not start automatically.
+ - Log in to the CLI as Super, Service, or any role granted the wsapi_set right.
+ - Start the WSAPI server by command: `startwsapi`.
+ - To check WSAPI state use command: `showwsapi`.
+ 3. Link template to the host.
+ 4. Configure macros {$HPE.PRIMERA.USERNAME} and {$HPE.PRIMERA.PASSWORD}.
+
+ You can discuss this template or leave feedback on our forum https://www.zabbix.com/forum/zabbix-suggestions-and-feedback/
+
+ Template tooling version used: 0.41
+ groups:
+ -
+ name: Templates/SAN
+ items:
+ -
+ uuid: 484a6b9568234bbca9b4bcae2833bbf1
+ name: 'HPE Primera: Get errors'
+ type: DEPENDENT
+ key: hpe.primera.data.errors
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: TEXT
+ description: 'A list of errors from WSAPI requests.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.errors
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: raw
+ triggers:
+ -
+ uuid: 570d440e7ec9445585003208eca06e63
+ expression: 'length(last(/HPE Primera by HTTP/hpe.primera.data.errors))>0'
+ name: 'HPE Primera: There are errors in requests to WSAPI'
+ opdata: '{ITEM.LASTVALUE1}'
+ priority: AVERAGE
+ description: 'Zabbix has received errors in requests to WSAPI.'
+ dependencies:
+ -
+ name: 'HPE Primera: Service is unavailable'
+ expression: 'max(/HPE Primera by HTTP/net.tcp.service["{$HPE.PRIMERA.API.SCHEME}","{HOST.CONN}","{$HPE.PRIMERA.API.PORT}"],5m)=0'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 530e20083da8423e9d30c8342f1b7da3
+ name: 'HPE Primera: Get data'
+ type: SCRIPT
+ key: hpe.primera.data.get
+ history: 0d
+ trends: '0'
+ value_type: TEXT
+ params: |
+ var Primera = {
+ params: {},
+ session_key: null,
+
+ setParams: function (params) {
+ ['username', 'password', 'base_url'].forEach(function (field) {
+ if (typeof params !== 'object' || typeof params[field] === 'undefined' || params[field] === '') {
+ throw 'Required param is not set: ' + field + '.';
+ }
+ });
+
+ Primera.params = params;
+ if (typeof Primera.params.base_url === 'string' && !Primera.params.base_url.endsWith('/')) {
+ Primera.params.base_url += '/';
+ }
+ },
+
+ login: function () {
+ if (Primera.session_key !== null) {
+ return;
+ }
+
+ var response, request = new HttpRequest();
+ request.addHeader('Content-Type: application/json');
+
+ response = request.post(Primera.params.base_url + 'api/v1/credentials', JSON.stringify({
+ 'user': Primera.params.username,
+ 'password': Primera.params.password
+ }));
+
+ if (request.getStatus() < 200 || request.getStatus() >= 300) {
+ throw 'Auth request failed with status code ' + request.getStatus() + ': ' + response;
+ }
+
+ if (response !== null) {
+ try {
+ auth_data = JSON.parse(response);
+ }
+ catch (error) {
+ throw 'Failed to parse auth response received from device API.';
+ }
+ }
+ else {
+ throw 'No data received by auth request.'
+ }
+
+ if ('key' in auth_data) {
+ Primera.session_key = auth_data['key'];
+ } else {
+ throw 'Auth response does not contain session key.'
+ }
+ },
+
+ logout: function () {
+ if (Primera.session_key !== null) {
+ (new HttpRequest()).delete(Primera.params.base_url + 'api/v1/credentials/' + Primera.session_key);
+ }
+ },
+
+ requestData: function (method) {
+ if (Primera.session_key === null) {
+ return;
+ }
+
+ request = new HttpRequest();
+ request.addHeader('X-HP3PAR-WSAPI-SessionKey: ' + Primera.session_key);
+
+ raw_data = request.get(Primera.params.base_url + 'api/v1/' + method);
+
+ if (request.getStatus() < 200 || request.getStatus() >= 300) {
+ throw 'Request failed with status code ' + request.getStatus() + ': ' + response;
+ }
+
+ if (raw_data !== null) {
+ try {
+ return JSON.parse(raw_data);
+ }
+ catch (error) {
+ throw 'Failed to parse response received from device API.';
+ }
+ else {
+ throw 'No data received by ' + method + ' request.';
+ }
+ }
+ };
+
+ var methods = ['disks', 'cpgs', 'hosts', 'ports', 'system', 'tasks', 'volumes'],
+ data = {};
+
+ data['errors'] = {};
+
+ try {
+ Primera.setParams(JSON.parse(value));
+
+ try {
+ Primera.login();
+ }
+ catch (error) {
+ data.errors.auth = error.toString();
+ }
+
+ if (!('auth' in data.errors)) {
+ for (var i in methods) {
+ try {
+ if (methods[i] === 'tasks') {
+ var result = [],
+ tmp_tasks = {};
+
+ tasks = Primera.requestData(methods[i]);
+
+ tasks.members.forEach(function (task) {
+ tmp_tasks[task.name] = task;
+ });
+
+ for (var task in tmp_tasks) {
+ result.push(tmp_tasks[task]);
+ }
+
+ data[methods[i]] = result;
+ }
+ else {
+ data[methods[i]] = Primera.requestData(methods[i]);
+ }
+ }
+ catch (error) {
+ data.errors[methods[i]] = error.toString();
+ }
+ }
+ }
+ }
+ catch (error) {
+ data.errors.params = error.toString();
+ }
+
+ try {
+ Primera.logout();
+ }
+ catch (error) {
+ }
+
+ if (Object.keys(data.errors).length !== 0) {
+ errors = 'Failed to receive data:';
+ for (var error in data.errors) {
+ errors += '\n' + error + ' : ' + data.errors[error];
+ }
+ data.errors = errors;
+ }
+ else {
+ data.errors = '';
+ }
+
+ return JSON.stringify(data);
+ description: 'The JSON with result of WSAPI requests.'
+ timeout: '{$HPE.PRIMERA.DATA.TIMEOUT}'
+ parameters:
+ -
+ name: base_url
+ value: '{$HPE.PRIMERA.API.SCHEME}://{HOST.CONN}'
+ -
+ name: password
+ value: '{$HPE.PRIMERA.API.PASSWORD}'
+ -
+ name: username
+ value: '{$HPE.PRIMERA.API.USERNAME}'
+ tags:
+ -
+ tag: component
+ value: raw
+ -
+ uuid: d5b8a74991d34652973a78d58203d5fd
+ name: 'HPE Primera: Disks total'
+ type: DEPENDENT
+ key: hpe.primera.disks.total
+ delay: '0'
+ history: 7d
+ description: 'Number of physical disks.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.disks.total
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ uuid: efc450d0682c4c5d93df41d05c10eceb
+ name: 'HPE Primera: Capacity allocated'
+ type: DEPENDENT
+ key: hpe.primera.system.capacity.allocated
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Allocated capacity in the system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.allocatedCapacityMiB
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: system
+ -
+ uuid: e3842eec2e45443681670d3c1d194900
+ name: 'HPE Primera: Capacity failed'
+ type: DEPENDENT
+ key: hpe.primera.system.capacity.failed
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Failed capacity in the system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.failedCapacityMiB
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: system
+ -
+ uuid: d23e888299d344238468481689f55e2d
+ name: 'HPE Primera: Capacity free'
+ type: DEPENDENT
+ key: hpe.primera.system.capacity.free
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Free capacity in the system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.freeCapacityMiB
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: system
+ -
+ uuid: 1e6fc0d68d18474e84b4fe2e4d3374d1
+ name: 'HPE Primera: Capacity total'
+ type: DEPENDENT
+ key: hpe.primera.system.capacity.total
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total capacity in the system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.totalCapacityMiB
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: system
+ -
+ uuid: 65bcf3fb456a45358795d2f9d8249e16
+ name: 'HPE Primera: Chunklet size'
+ type: DEPENDENT
+ key: hpe.primera.system.chunklet.size
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Chunklet size.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.chunkletSizeMiB
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: system
+ -
+ uuid: dd6fd61256cc4eeeb94f50d0c86fc51f
+ name: 'HPE Primera: System contact'
+ type: DEPENDENT
+ key: hpe.primera.system.contact
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Contact of the system.'
+ inventory_link: CONTACT
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.contact
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: dd61f3a680284893801c96bdbd445645
+ name: 'HPE Primera: System location'
+ type: DEPENDENT
+ key: hpe.primera.system.location
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Location of the system.'
+ inventory_link: LOCATION
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.location
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 5f28ec66be1f43208139476af3653997
+ name: 'HPE Primera: Model'
+ type: DEPENDENT
+ key: hpe.primera.system.model
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'System model.'
+ inventory_link: MODEL
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.model
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 45281453bf204365a8a8ac2ba7255e54
+ name: 'HPE Primera: System name'
+ type: DEPENDENT
+ key: hpe.primera.system.name
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'System name.'
+ inventory_link: NAME
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.name
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: acf6d37022884dc99a3b55c95f6b19c8
+ name: 'HPE Primera: Nodes online'
+ type: DEPENDENT
+ key: hpe.primera.system.nodes.online
+ delay: '0'
+ history: 7d
+ description: 'Number of online nodes in the system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.onlineNodes.length()
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: 65b22e04d7334aaf970a8961a46c22c9
+ name: 'HPE Primera: Nodes total'
+ type: DEPENDENT
+ key: hpe.primera.system.nodes.total
+ delay: '0'
+ history: 7d
+ description: 'Total number of nodes in the system.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.totalNodes
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: d194672ea7f64dd58296d7fb2537f35b
+ name: 'HPE Primera: Serial number'
+ type: DEPENDENT
+ key: hpe.primera.system.serial_number
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'System serial number.'
+ inventory_link: SERIALNO_A
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.serialNumber
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: e0f0ff7657784c8eab1a71a68ceefc19
+ name: 'HPE Primera: Software version number'
+ type: DEPENDENT
+ key: hpe.primera.system.sw_version
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Storage system software version number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.system.systemVersion
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: system
+ -
+ uuid: a0b4fdee38a64c5f82fd051ea74a7b2d
+ name: 'HPE Primera: Service ping'
+ type: SIMPLE
+ key: 'net.tcp.service["{$HPE.PRIMERA.API.SCHEME}","{HOST.CONN}","{$HPE.PRIMERA.API.PORT}"]'
+ history: 7d
+ description: 'Checks if the service is running and accepting TCP connections.'
+ valuemap:
+ name: 'Service state'
+ preprocessing:
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ tags:
+ -
+ tag: component
+ value: health
+ -
+ tag: component
+ value: network
+ triggers:
+ -
+ uuid: 8e7aa46322c643878e509461dbb9169d
+ expression: 'max(/HPE Primera by HTTP/net.tcp.service["{$HPE.PRIMERA.API.SCHEME}","{HOST.CONN}","{$HPE.PRIMERA.API.PORT}"],5m)=0'
+ name: 'HPE Primera: Service is unavailable'
+ priority: HIGH
+ manual_close: 'YES'
+ tags:
+ -
+ tag: scope
+ value: availability
+ discovery_rules:
+ -
+ uuid: b9132b095eb349c99e868ea40364596d
+ name: 'Common provisioning groups discovery'
+ type: DEPENDENT
+ key: hpe.primera.cpg.discovery
+ delay: '0'
+ filter:
+ evaltype: AND
+ conditions:
+ -
+ macro: '{#NAME}'
+ value: '{$HPE.PRIMERA.CPG.NAME.MATCHES}'
+ formulaid: A
+ -
+ macro: '{#NAME}'
+ value: '{$HPE.PRIMERA.CPG.NAME.NOT_MATCHES}'
+ operator: NOT_MATCHES_REGEX
+ formulaid: B
+ description: 'List of CPGs resources.'
+ item_prototypes:
+ -
+ uuid: 6d070a747a01498b94c56da721a63192
+ name: 'CPG [{#NAME}]: Number of FPVVs'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.fpvv["{#ID}",count]'
+ delay: '0'
+ history: 7d
+ description: 'Number of FPVVs (Fully Provisioned Virtual Volumes) allocated in the CPG.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].numFPVVs.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: a7fccd5afcf5469ca11a9436240eab5c
+ name: 'CPG [{#NAME}]: Raw space: Free'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.raw["{#ID}",free]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Raw free space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].rawFreeSpaceMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 8f26a54327f54e968f422081e6045217
+ name: 'CPG [{#NAME}]: Raw space: Shared'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.raw["{#ID}",shared]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Raw shared space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].rawSharedSpaceMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: e3ac07e2707a44fd8c166f4618fd79a1
+ name: 'CPG [{#NAME}]: Raw space: Total'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.raw["{#ID}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Raw total space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].rawTotalSpaceMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 6dfd722ad85b481a9c2b04a4a5eb91fe
+ name: 'CPG [{#NAME}]: Logical disk space: Snapshot administration: Total (raw)'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.sa["{#ID}",raw_total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total physical (raw) logical disk space in snapshot administration.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].SAUsage.rawTotalMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 8b2dc75fdcfa48908ece97768641f055
+ name: 'CPG [{#NAME}]: Logical disk space: Snapshot administration: Used (raw)'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.sa["{#ID}",raw_used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Amount of physical (raw) logical disk used in snapshot administration.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].SAUsage.rawUsedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: d55f0eab811641fdbb9a8bc8c54815ee
+ name: 'CPG [{#NAME}]: Logical disk space: Snapshot administration: Total'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.sa["{#ID}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total logical disk space in snapshot administration.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].SAUsage.totalMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: cf467bc7d9ac45259f284eeab6ae7f6a
+ name: 'CPG [{#NAME}]: Logical disk space: Snapshot administration: Used'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.sa["{#ID}",used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Amount of logical disk used in snapshot administration.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].SAUsage.usedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 61dd9aa18c714863b606d18b2fff6c57
+ name: 'CPG [{#NAME}]: Logical disk space: Snapshot data: Total (raw)'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.sd["{#ID}",raw_total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total physical (raw) logical disk space in snapshot data space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].SDUsage.rawTotalMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: f95ee3c4c0c64d46a47dd68b346f2fa5
+ name: 'CPG [{#NAME}]: Logical disk space: Snapshot data: Used (raw)'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.sd["{#ID}",raw_used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Amount of physical (raw) logical disk used in snapshot data space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].SDUsage.rawUsedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 797f3335d8704d4bb8b53e34b3e6589e
+ name: 'CPG [{#NAME}]: Logical disk space: Snapshot data: Total'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.sd["{#ID}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total logical disk space in snapshot data space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].SDUsage.totalMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 51e40f6a1eb249d58bd79948d403d4f7
+ name: 'CPG [{#NAME}]: Logical disk space: Snapshot data: Used'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.sd["{#ID}",used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Amount of logical disk used in snapshot data space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].SDUsage.usedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: b7a8880bdafe4f0da4dd8cee6d4fdfa4
+ name: 'CPG [{#NAME}]: Logical disk space: User space: Total (raw)'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.usr["{#ID}",raw_total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total physical (raw) logical disk space in user data space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].UsrUsage.rawTotalMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 1feabd57f12a48b98dab098435179725
+ name: 'CPG [{#NAME}]: Logical disk space: User space: Used (raw)'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.usr["{#ID}",raw_used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Amount of physical (raw) logical disk used in user data space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].UsrUsage.rawUsedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 6f85014bc639420aa409d97d42cb75b2
+ name: 'CPG [{#NAME}]: Logical disk space: User space: Total'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.usr["{#ID}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total logical disk space in user data space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].UsrUsage.totalMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: c21f77a45ab443099bf957fbb39478f3
+ name: 'CPG [{#NAME}]: Logical disk space: User space: Used'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space.usr["{#ID}",used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Amount of logical disk used in user data space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].UsrUsage.usedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: a6cd977f27a8463cb385715327e34955
+ name: 'CPG [{#NAME}]: CPG space: Free'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space["{#ID}",free]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Free CPG space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].freeSpaceMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 17cf1cddafd444f8a5616a472c1a019b
+ name: 'CPG [{#NAME}]: CPG space: Shared'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space["{#ID}",shared]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Shared CPG space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].sharedSpaceMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 3950d779a0394615b8ec311525ed4168
+ name: 'CPG [{#NAME}]: CPG space: Total'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.space["{#ID}",total]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total CPG space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].totalSpaceMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: a7e2188d600a4715a58deba46f3b46ac
+ name: 'CPG [{#NAME}]: Degraded state'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.state["{#ID}",degraded]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: |
+ Detailed state of the CPG:
+
+ LDS_NOT_STARTED (1) - LDs not started.
+ NOT_STARTED (2) - VV not started.
+ NEEDS_CHECK (3) - check for consistency.
+ NEEDS_MAINT_CHECK (4) - maintenance check is required.
+ INTERNAL_CONSISTENCY_ERROR (5) - internal consistency error.
+ SNAPDATA_INVALID (6) - invalid snapshot data.
+ PRESERVED (7) - unavailable LD sets due to missing chunklets. Preserved remaining VV data.
+ STALE (8) - parts of the VV contain old data because of a copy-on-write operation.
+ COPY_FAILED (9) - a promote or copy operation to this volume failed.
+ DEGRADED_AVAIL (10) - degraded due to availability.
+ DEGRADED_PERF (11) - degraded due to performance.
+ PROMOTING (12) - volume is the current target of a promote operation.
+ COPY_TARGET (13) - volume is the current target of a physical copy operation.
+ RESYNC_TARGET (14) - volume is the current target of a resynchronized copy operation.
+ TUNING (15) - volume tuning is in progress.
+ CLOSING (16) - volume is closing.
+ REMOVING (17) - removing the volume.
+ REMOVING_RETRY (18) - retrying a volume removal operation.
+ CREATING (19) - creating a volume.
+ COPY_SOURCE (20) - copy source.
+ IMPORTING (21) - importing a volume.
+ CONVERTING (22) - converting a volume.
+ INVALID (23) - invalid.
+ EXCLUSIVE (24) - local storage system has exclusive access to the volume.
+ CONSISTENT (25) - volume is being imported consistently along with other volumes in the VV set.
+ STANDBY (26) - volume in standby mode.
+ SD_META_INCONSISTENT (27) - SD Meta Inconsistent.
+ SD_NEEDS_FIX (28) - SD needs fix.
+ SD_META_FIXING (29) - SD meta fix.
+ UNKNOWN (999) - unknown state.
+ NOT_SUPPORTED_BY_WSAPI (1000) - state not supported by WSAPI.
+ valuemap:
+ name: 'Volume detailed state enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].degradedStates.first()'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 837b48053400487885bf051a78f2200a
+ name: 'CPG [{#NAME}]: Failed state'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.state["{#ID}",failed]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: |
+ Detailed state of the CPG:
+
+ LDS_NOT_STARTED (1) - LDs not started.
+ NOT_STARTED (2) - VV not started.
+ NEEDS_CHECK (3) - check for consistency.
+ NEEDS_MAINT_CHECK (4) - maintenance check is required.
+ INTERNAL_CONSISTENCY_ERROR (5) - internal consistency error.
+ SNAPDATA_INVALID (6) - invalid snapshot data.
+ PRESERVED (7) - unavailable LD sets due to missing chunklets. Preserved remaining VV data.
+ STALE (8) - parts of the VV contain old data because of a copy-on-write operation.
+ COPY_FAILED (9) - a promote or copy operation to this volume failed.
+ DEGRADED_AVAIL (10) - degraded due to availability.
+ DEGRADED_PERF (11) - degraded due to performance.
+ PROMOTING (12) - volume is the current target of a promote operation.
+ COPY_TARGET (13) - volume is the current target of a physical copy operation.
+ RESYNC_TARGET (14) - volume is the current target of a resynchronized copy operation.
+ TUNING (15) - volume tuning is in progress.
+ CLOSING (16) - volume is closing.
+ REMOVING (17) - removing the volume.
+ REMOVING_RETRY (18) - retrying a volume removal operation.
+ CREATING (19) - creating a volume.
+ COPY_SOURCE (20) - copy source.
+ IMPORTING (21) - importing a volume.
+ CONVERTING (22) - converting a volume.
+ INVALID (23) - invalid.
+ EXCLUSIVE (24) - local storage system has exclusive access to the volume.
+ CONSISTENT (25) - volume is being imported consistently along with other volumes in the VV set.
+ STANDBY (26) - volume in standby mode.
+ SD_META_INCONSISTENT (27) - SD Meta Inconsistent.
+ SD_NEEDS_FIX (28) - SD needs fix.
+ SD_META_FIXING (29) - SD meta fix.
+ UNKNOWN (999) - unknown state.
+ NOT_SUPPORTED_BY_WSAPI (1000) - state not supported by WSAPI.
+ valuemap:
+ name: 'Volume detailed state enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].failedStates.first()'
+ -
+ type: JAVASCRIPT
+ parameters:
+ - 'return JSON.stringify(JSON.parse(value));'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: 3fe9b7c875c248e3b09c98162e30ebf8
+ name: 'CPG [{#NAME}]: State'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.state["{#ID}"]'
+ delay: '0'
+ history: 7d
+ description: |
+ Overall state of the CPG:
+
+ NORMAL (1) - normal operation;
+ DEGRADED (2) - degraded state;
+ FAILED (3) - abnormal operation;
+ UNKNOWN (99) - unknown state.
+ valuemap:
+ name: 'State enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].state.first()'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 32a29b7a4bf340ef8ab07a8db3bef309
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.cpg.state["{#ID}"])=2'
+ name: 'CPG [{#NAME}]: Degraded'
+ opdata: 'Current value: {ITEM.LASTVALUE1}'
+ priority: AVERAGE
+ description: 'CPG [{#NAME}] is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: 85c26e64c8074e8b9ab52f20394afeee
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.cpg.state["{#ID}"])=3'
+ name: 'CPG [{#NAME}]: Failed'
+ opdata: 'Current value: {ITEM.LASTVALUE1}'
+ priority: HIGH
+ description: 'CPG [{#NAME}] is in failed state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: 18544a7742af4678bd8c37ad84a8d137
+ name: 'CPG [{#NAME}]: Number of TDVVs'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.tdvv["{#ID}",count]'
+ delay: '0'
+ history: 7d
+ description: 'Number of TDVVs (Thinly Deduplicated Virtual Volume) created in the CPG.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].numTDVVs.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ -
+ uuid: f93dc70fa63a47da9253a447a67df685
+ name: 'CPG [{#NAME}]: Number of TPVVs'
+ type: DEPENDENT
+ key: 'hpe.primera.cpg.tpvv["{#ID}",count]'
+ delay: '0'
+ history: 7d
+ description: 'Number of TPVVs (Thinly Provisioned Virtual Volumes) allocated in the CPG.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.cpgs.members[?(@.id == "{#ID}")].numTPVVs.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: cpg
+ -
+ tag: component
+ value: storage
+ -
+ tag: cpg
+ value: '{#NAME}'
+ graph_prototypes:
+ -
+ uuid: c5d1e864f752465eae8822c06d635aeb
+ name: 'CPG [{#NAME}]: CPG space'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space["{#ID}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space["{#ID}",shared]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space["{#ID}",total]'
+ -
+ uuid: 1ef5d5b0090c4f168da6f842972af688
+ name: 'CPG [{#NAME}]: Number of virtual volumes'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.fpvv["{#ID}",count]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.tpvv["{#ID}",count]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.tdvv["{#ID}",count]'
+ -
+ uuid: 4c15694d488f42d6bc5b9caf4fe9e049
+ name: 'CPG [{#NAME}]: Raw space'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.raw["{#ID}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.raw["{#ID}",shared]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.raw["{#ID}",total]'
+ -
+ uuid: 0ffb02e0b9144e0583489ca1d1c8d2dd
+ name: 'CPG [{#NAME}]: Snapshot administration space'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.sa["{#ID}",total]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.sa["{#ID}",used]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.sa["{#ID}",raw_total]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.sa["{#ID}",raw_used]'
+ -
+ uuid: d64484828dda48cbbf7dc0f8a9c2f34d
+ name: 'CPG [{#NAME}]: Snapshot data space'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.sd["{#ID}",total]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.sd["{#ID}",used]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.sd["{#ID}",raw_total]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.sd["{#ID}",raw_used]'
+ -
+ uuid: dd590898a3644130b897f57e8837cb3a
+ name: 'CPG [{#NAME}]: User data space'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.usr["{#ID}",total]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.usr["{#ID}",used]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.usr["{#ID}",raw_total]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.cpg.space.usr["{#ID}",raw_used]'
+ master_item:
+ key: hpe.primera.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#ID}'
+ path: $.id
+ -
+ lld_macro: '{#NAME}'
+ path: $.name
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.cpgs.members
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: a83ed573e6ab40e8b7306178ddd2658b
+ name: 'Disks discovery'
+ type: DEPENDENT
+ key: hpe.primera.disks.discovery
+ delay: '0'
+ description: 'List of physical disk resources.'
+ item_prototypes:
+ -
+ uuid: 40e074af5d7f44bb8691290971fc7c5c
+ name: 'Disk [{#POSITION}]: Free size'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",free_size]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Physical disk free size.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].freeSizeMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ -
+ uuid: 9bb7a86118614d339b4dee3238b261ff
+ name: 'Disk [{#POSITION}]: Firmware version'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",fw_version]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Physical disk firmware version.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].fwVersion.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ -
+ uuid: 288f7eef0a7c43afa7a3623471c92097
+ name: 'Disk [{#POSITION}]: Path A0 degraded'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",loop_a0_degraded]'
+ delay: '0'
+ history: 7d
+ description: 'Indicates if this is a degraded path for the disk.'
+ valuemap:
+ name: Boolean
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].loopA0.degraded.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: BOOL_TO_DECIMAL
+ parameters:
+ - ''
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ trigger_prototypes:
+ -
+ uuid: f1672a33f9404216a1ffdbe3fcefd0bf
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",loop_a0_degraded])=1'
+ name: 'Disk [{#POSITION}]: Path A0 degraded'
+ priority: AVERAGE
+ description: 'Disk [{#POSITION}] path A0 in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 1e89322b49fb46bdacd22a562995f2fc
+ name: 'Disk [{#POSITION}]: Path A1 degraded'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",loop_a1_degraded]'
+ delay: '0'
+ history: 7d
+ description: 'Indicates if this is a degraded path for the disk.'
+ valuemap:
+ name: Boolean
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].loopA1.degraded.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: BOOL_TO_DECIMAL
+ parameters:
+ - ''
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ trigger_prototypes:
+ -
+ uuid: a28b1b4cdc5d4cb4afd9b7dd5e5f4f46
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",loop_a1_degraded])=1'
+ name: 'Disk [{#POSITION}]: Path A1 degraded'
+ priority: AVERAGE
+ description: 'Disk [{#POSITION}] path A1 in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 2b56e5a6ffbd4e6189fff707d508f955
+ name: 'Disk [{#POSITION}]: Path B0 degraded'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",loop_b0_degraded]'
+ delay: '0'
+ history: 7d
+ description: 'Indicates if this is a degraded path for the disk.'
+ valuemap:
+ name: Boolean
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].loopB0.degraded.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: BOOL_TO_DECIMAL
+ parameters:
+ - ''
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ trigger_prototypes:
+ -
+ uuid: 0ff32326e7784111842198ac6457c5cc
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",loop_b0_degraded])=1'
+ name: 'Disk [{#POSITION}]: Path B0 degraded'
+ priority: AVERAGE
+ description: 'Disk [{#POSITION}] path B0 in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: cfb88804564d4e0c914760daec53276f
+ name: 'Disk [{#POSITION}]: Path B1 degraded'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",loop_b1_degraded]'
+ delay: '0'
+ history: 7d
+ description: 'Indicates if this is a degraded path for the disk.'
+ valuemap:
+ name: Boolean
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].loopB1.degraded.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: BOOL_TO_DECIMAL
+ parameters:
+ - ''
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ trigger_prototypes:
+ -
+ uuid: d55532408f3c40408dbd05671c66b5f3
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",loop_b1_degraded])=1'
+ name: 'Disk [{#POSITION}]: Path B1 degraded'
+ priority: AVERAGE
+ description: 'Disk [{#POSITION}] path B1 in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ uuid: 1387d1129e4a418e91fb0e99179116f5
+ name: 'Disk [{#POSITION}]: Manufacturer'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",manufacturer]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Physical disk manufacturer.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].manufacturer.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ -
+ uuid: 1892d6230e244e1089a5eca8654ba2fa
+ name: 'Disk [{#POSITION}]: Model'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",model]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Manufacturer''s device ID for disk.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].model.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ -
+ uuid: 495ceedbdf1644fdb56cf56123c1ec01
+ name: 'Disk [{#POSITION}]: RPM'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",rpm]'
+ delay: '0'
+ history: 7d
+ units: '!rpm'
+ description: 'RPM of the physical disk.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].RPM.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ -
+ uuid: 07fa233e273d4d6e9813705d0afc82f5
+ name: 'Disk [{#POSITION}]: Serial number'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",serial_number]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Disk drive serial number.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].serialNumber.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ -
+ uuid: acb23a0dc2674f57bada95dd12972662
+ name: 'Disk [{#POSITION}]: State'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",state]'
+ delay: '0'
+ history: 7d
+ description: |
+ State of the physical disk:
+
+ Normal (1) - physical disk is in Normal state;
+ Degraded (2) - physical disk is not operating normally;
+ New (3) - physical disk is new, needs to be admitted;
+ Failed (4) - physical disk has failed;
+ Unknown (99) - physical disk state is unknown.
+ valuemap:
+ name: 'diskState enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].state.first()'
+ error_handler: CUSTOM_VALUE
+ error_handler_params: '99'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ trigger_prototypes:
+ -
+ uuid: d8991103e26b4ffea5fb64dc3519eb63
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",state])=2'
+ name: 'Disk [{#POSITION}]: Degraded'
+ priority: AVERAGE
+ description: 'Disk [{#POSITION}] in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: bc8c8281c3ac4742ba8f570a56753dd3
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",state])=3'
+ name: 'Disk [{#POSITION}]: Failed'
+ priority: HIGH
+ description: 'Disk [{#POSITION}] in failed state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: d4bda084df0b4a489fac08d1acae4e17
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.disk["{#ID}",state])=99'
+ name: 'Disk [{#POSITION}]: Unknown issue'
+ priority: INFO
+ description: 'Disk [{#POSITION}] in unknown state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: 83395e3165c949e8997e93bfce0ac1d0
+ name: 'Disk [{#POSITION}]: Total size'
+ type: DEPENDENT
+ key: 'hpe.primera.disk["{#ID}",total_size]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Physical disk total size.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.disks.members[?(@.id == "{#ID}")].totalSizeMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: disk
+ -
+ tag: component
+ value: storage
+ -
+ tag: disk
+ value: '{#POSITION}'
+ master_item:
+ key: hpe.primera.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#ID}'
+ path: $.id
+ -
+ lld_macro: '{#POSITION}'
+ path: $.position
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.disks.members
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 77ae172949044c148ac8f56f05d3af33
+ name: 'Hosts discovery'
+ type: DEPENDENT
+ key: hpe.primera.hosts.discovery
+ delay: '0'
+ description: 'List of host properties.'
+ filter:
+ evaltype: AND
+ conditions:
+ -
+ macro: '{#NAME}'
+ operator: EXISTS
+ formulaid: A
+ item_prototypes:
+ -
+ uuid: 142a03a36dbf477ebbcb99994efe4246
+ name: 'Host [{#NAME}]: Comment'
+ type: DEPENDENT
+ key: 'hpe.primera.host["{#ID}",comment]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'Additional information for the host.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.hosts.members[?(@.id == "{#ID}")].descriptors.comment.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: host
+ -
+ tag: host
+ value: '{#NAME}'
+ -
+ uuid: 44a06761b5174c67ace5487b7ec9f0e5
+ name: 'Host [{#NAME}]: Contact'
+ type: DEPENDENT
+ key: 'hpe.primera.host["{#ID}",contact]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The host''s owner and contact.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.hosts.members[?(@.id == "{#ID}")].descriptors.contact.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: host
+ -
+ tag: host
+ value: '{#NAME}'
+ -
+ uuid: b3bd017e96d843248bbb9cb2240e861b
+ name: 'Host [{#NAME}]: IP address'
+ type: DEPENDENT
+ key: 'hpe.primera.host["{#ID}",ipaddress]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The host''s IP address.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.hosts.members[?(@.id == "{#ID}")].descriptors.IPAddr.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: host
+ -
+ tag: host
+ value: '{#NAME}'
+ -
+ uuid: 367466a0f7084e579f3c11d820dc7f04
+ name: 'Host [{#NAME}]: Location'
+ type: DEPENDENT
+ key: 'hpe.primera.host["{#ID}",location]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The host''s location.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.hosts.members[?(@.id == "{#ID}")].descriptors.location.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: host
+ -
+ tag: host
+ value: '{#NAME}'
+ -
+ uuid: 997e52f8f50e47738a1aefbcedaa5a82
+ name: 'Host [{#NAME}]: Model'
+ type: DEPENDENT
+ key: 'hpe.primera.host["{#ID}",model]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The host''s model.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.hosts.members[?(@.id == "{#ID}")].descriptors.model.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: host
+ -
+ tag: host
+ value: '{#NAME}'
+ -
+ uuid: 4db5068c8aea4940adb5f8863d50ef47
+ name: 'Host [{#NAME}]: OS'
+ type: DEPENDENT
+ key: 'hpe.primera.host["{#ID}",os]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: 'The operating system running on the host.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.hosts.members[?(@.id == "{#ID}")].descriptors.os.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1d
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: host
+ -
+ tag: host
+ value: '{#NAME}'
+ master_item:
+ key: hpe.primera.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#ID}'
+ path: $.id
+ -
+ lld_macro: '{#NAME}'
+ path: $.name
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.hosts.members
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: 3c9222777f2649749df76cbf61601557
+ name: 'Ports discovery'
+ type: DEPENDENT
+ key: hpe.primera.ports.discovery
+ delay: '0'
+ filter:
+ evaltype: AND
+ conditions:
+ -
+ macro: '{#TYPE}'
+ value: '3'
+ operator: NOT_MATCHES_REGEX
+ formulaid: A
+ description: 'List of ports.'
+ item_prototypes:
+ -
+ uuid: 9241e0b26de74ea49f28e1c09e15a2cd
+ name: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Failover state'
+ type: DEPENDENT
+ key: 'hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",failover_state]'
+ delay: '0'
+ history: 7d
+ description: |
+ The state of the failover operation, shown for the two ports indicated in the N:S:P and Partner columns. The value can be one of the following:
+
+ none (1) - no failover in operation;
+ failover_pending (2) - in the process of failing over to partner;
+ failed_over (3) - failed over to partner;
+ active (4) - the partner port is failed over to this port;
+ active_down (5) - the partner port is failed over to this port, but this port is down;
+ active_failed (6) - the partner port is failed over to this port, but this port is down;
+ failback_pending (7) - in the process of failing back from partner.
+ valuemap:
+ name: 'portFailOverState enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.ports.members[?(@.portPos.node == "{#NODE}" && @.portPos.slot == "{#SLOT}" && @.portPos.cardPort == "{#CARD.PORT}")].failoverState.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NODE}:{#SLOT}:{#CARD.PORT}'
+ trigger_prototypes:
+ -
+ uuid: 65f3f3b098984842b5246bfb5842bc78
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",failover_state])<>1 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",failover_state])<>4'
+ name: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Failover state is {ITEM.VALUE1}'
+ priority: AVERAGE
+ description: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}] has failover error.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: 8abba6d6f6e749b0be277056421a1958
+ name: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Hardware type'
+ type: DEPENDENT
+ key: 'hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",hw_type]'
+ delay: '0'
+ history: 7d
+ description: |
+ Hardware type:
+
+ FC (1) - Fibre channel HBA;
+ ETH (2) - Ethernet NIC;
+ iSCSI (3) - iSCSI HBA;
+ CNA (4) - Converged network adapter;
+ SAS (5) - SAS HBA;
+ COMBO (6) - Combo card;
+ NVME (7) - NVMe drive;
+ UNKNOWN (99) - unknown hardware type.
+ valuemap:
+ name: 'hardwareType enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.ports.members[?(@.portPos.node == "{#NODE}" && @.portPos.slot == "{#SLOT}" && @.portPos.cardPort == "{#CARD.PORT}")].hardwareType.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NODE}:{#SLOT}:{#CARD.PORT}'
+ -
+ uuid: 55119ce474024203ac039f4aa797dd4c
+ name: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Link state'
+ type: DEPENDENT
+ key: 'hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state]'
+ delay: '0'
+ history: 7d
+ description: |
+ Port link state:
+
+ CONFIG_WAIT (1) - configuration wait;
+ ALPA_WAIT (2) - ALPA wait;
+ LOGIN_WAIT (3) - login wait;
+ READY (4) - link is ready;
+ LOSS_SYNC (5) - link is loss sync;
+ ERROR_STATE (6) - in error state;
+ XXX (7) - xxx;
+ NONPARTICIPATE (8) - link did not participate;
+ COREDUMP (9) - taking coredump;
+ OFFLINE (10) - link is offline;
+ FWDEAD (11) - firmware is dead;
+ IDLE_FOR_RESET (12) - link is idle for reset;
+ DHCP_IN_PROGRESS (13) - DHCP is in progress;
+ PENDING_RESET (14) - link reset is pending;
+ NEW (15) - link in new. This value is applicable for only virtual ports;
+ DISABLED (16) - link in disabled. This value is applicable for only virtual ports;
+ DOWN (17) - link in down. This value is applicable for only virtual ports;
+ FAILED (18) - link in failed. This value is applicable for only virtual ports;
+ PURGING (19) - link in purging. This value is applicable for only virtual ports.
+ valuemap:
+ name: 'portLinkState enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.ports.members[?(@.portPos.node == "{#NODE}" && @.portPos.slot == "{#SLOT}" && @.portPos.cardPort == "{#CARD.PORT}")].linkState.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NODE}:{#SLOT}:{#CARD.PORT}'
+ trigger_prototypes:
+ -
+ uuid: c7ee19ea175d4c63a9ae67e0ab59253b
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>4 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>1 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>3 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>13 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>15 and last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])<>16'
+ name: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Link state is {ITEM.VALUE1}'
+ priority: HIGH
+ description: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}] not in ready state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: f0c8851f843e41dcb6820c943efcbe2f
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=1 or last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=3 or last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=13 or last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=15 or last(/HPE Primera by HTTP/hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",link_state])=16'
+ name: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Link state is {ITEM.VALUE1}'
+ priority: AVERAGE
+ description: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}] not in ready state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: c049e53b25bb4cb58cabbff1d91b3e88
+ name: 'Port [{#NODE}:{#SLOT}:{#CARD.PORT}]: Type'
+ type: DEPENDENT
+ key: 'hpe.primera.port["{#NODE}:{#SLOT}:{#CARD.PORT}",type]'
+ delay: '0'
+ history: 7d
+ description: |
+ Port connection type:
+
+ HOST (1) - FC port connected to hosts or fabric;
+ DISK (2) - FC port connected to disks;
+ FREE (3) - port is not connected to hosts or disks;
+ IPORT (4) - port is in iport mode;
+ RCFC (5) - FC port used for remote copy;
+ PEER (6) - FC port used for data migration;
+ RCIP (7) - IP (Ethernet) port used for remote copy;
+ ISCSI (8) - iSCSI (Ethernet) port connected to hosts;
+ CNA (9) - CNA port, which can be FCoE or iSCSI;
+ FS (10) - Ethernet File Persona ports.
+ valuemap:
+ name: 'portConnType enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.ports.members[?(@.portPos.node == "{#NODE}" && @.portPos.slot == "{#SLOT}" && @.portPos.cardPort == "{#CARD.PORT}")].type.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: port
+ -
+ tag: port
+ value: '{#NODE}:{#SLOT}:{#CARD.PORT}'
+ master_item:
+ key: hpe.primera.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#CARD.PORT}'
+ path: $.portPos.cardPort
+ -
+ lld_macro: '{#NODE}'
+ path: $.portPos.node
+ -
+ lld_macro: '{#SLOT}'
+ path: $.portPos.slot
+ -
+ lld_macro: '{#TYPE}'
+ path: $.type
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.ports.members
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: b47a6afafca6486ea4ffb12dd3322bab
+ name: 'Tasks discovery'
+ type: DEPENDENT
+ key: hpe.primera.tasks.discovery
+ delay: '0'
+ filter:
+ evaltype: AND
+ conditions:
+ -
+ macro: '{#NAME}'
+ value: '{$HPE.PRIMERA.LLD.FILTER.TASK.NAME.MATCHES}'
+ formulaid: A
+ -
+ macro: '{#NAME}'
+ value: '{$HPE.PRIMERA.LLD.FILTER.TASK.NAME.NOT_MATCHES}'
+ operator: NOT_MATCHES_REGEX
+ formulaid: B
+ -
+ macro: '{#TYPE}'
+ value: '{$HPE.PRIMERA.LLD.FILTER.TASK.TYPE.MATCHES}'
+ formulaid: C
+ -
+ macro: '{#TYPE}'
+ value: '{$HPE.PRIMERA.LLD.FILTER.TASK.TYPE.NOT_MATCHES}'
+ operator: NOT_MATCHES_REGEX
+ formulaid: D
+ lifetime: 1d
+ description: 'List of tasks started within last 24 hours.'
+ item_prototypes:
+ -
+ uuid: cbcdf169dcf646cb959206bbb6cf3642
+ name: 'Task [{#NAME}]: Finish time'
+ type: DEPENDENT
+ key: 'hpe.primera.task["{#ID}",finish_time]'
+ delay: '0'
+ history: 7d
+ units: unixtime
+ description: 'Task finish time.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.tasks[?(@.id == "{#ID}")].finishTime.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ type: NOT_MATCHES_REGEX
+ parameters:
+ - ^-$
+ error_handler: DISCARD_VALUE
+ -
+ type: JAVASCRIPT
+ parameters:
+ - |
+ raw_date = value.split(' ');
+
+ return Date.parse(raw_date[0] + 'T' + raw_date[1] + raw_date[2] + ':00')/1000;
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: task
+ -
+ tag: task
+ value: '{#NAME}'
+ -
+ uuid: 66140b134d954319a96eb17750da6b7c
+ name: 'Task [{#NAME}]: Start time'
+ type: DEPENDENT
+ key: 'hpe.primera.task["{#ID}",start_time]'
+ delay: '0'
+ history: 7d
+ units: unixtime
+ description: 'Task start time.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.tasks[?(@.id == "{#ID}")].startTime.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ type: JAVASCRIPT
+ parameters:
+ - |
+ raw_date = value.split(' ');
+
+ return Date.parse(raw_date[0] + 'T' + raw_date[1] + raw_date[2] + ':00')/1000;
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: task
+ -
+ tag: task
+ value: '{#NAME}'
+ -
+ uuid: e01b3c84a6594e419c358c7ea297159b
+ name: 'Task [{#NAME}]: Status'
+ type: DEPENDENT
+ key: 'hpe.primera.task["{#ID}",status]'
+ delay: '0'
+ history: 7d
+ description: |
+ Task status:
+
+ DONE (1) - task is finished;
+ ACTIVE (2) - task is in progress;
+ CANCELLED (3) - task is canceled;
+ FAILED (4) - task failed.
+ valuemap:
+ name: 'taskStatus enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.tasks[?(@.id == "{#ID}")].status.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: task
+ -
+ tag: task
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: 63340c376d86492198e00d7ae10f063c
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.task["{#ID}",status])=3'
+ name: 'Task [{#NAME}]: Cancelled'
+ priority: INFO
+ description: 'Task [{#NAME}] is cancelled.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: af326c0f259144d28ebeb60e19bae903
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.task["{#ID}",status])=4'
+ name: 'Task [{#NAME}]: Failed'
+ priority: AVERAGE
+ description: 'Task [{#NAME}] is failed.'
+ tags:
+ -
+ tag: scope
+ value: notice
+ -
+ uuid: a67262cd7be642b9b56194d8bbb7e928
+ name: 'Task [{#NAME}]: Type'
+ type: DEPENDENT
+ key: 'hpe.primera.task["{#ID}",type]'
+ delay: '0'
+ history: 7d
+ description: |
+ Task type:
+
+ VV_COPY (1) - track the physical copy operations;
+ PHYS_COPY_RESYNC (2) - track physical copy resynchronization operations;
+ MOVE_REGIONS (3) - track region move operations;
+ PROMOTE_SV (4) - track virtual-copy promotions;
+ REMOTE_COPY_SYNC (5) - track remote copy group synchronizations;
+ REMOTE_COPY_REVERSE (6) - track the reversal of a remote copy group;
+ REMOTE_COPY_FAILOVER (7) - track the change-over of a secondary volume group to a primaryvolume group;REMOTE_COPY_RECOVER (8) - track synchronization start after a failover operation from originalsecondary cluster to original primary cluster;
+ REMOTE_COPY_RESTORE (9) - tracks the restoration process for groups that have already beenrecovered;
+ COMPACT_CPG (10) - track space consolidation in CPGs;
+ COMPACT_IDS (11) - track space consolidation in logical disks;
+ SNAPSHOT_ACCOUNTING (12) - track progress of snapshot space usage accounting;
+ CHECK_VV (13) - track the progress of the check-volume operation;
+ SCHEDULED_TASK (14) - track tasks that have been executed by the system scheduler;
+ SYSTEM_TASK (15) - track tasks that are periodically run by the storage system;
+ BACKGROUND_TASK (16) - track commands started using the starttask command;
+ IMPORT_VV (17) - track tasks that migrate data to the local storage system;
+ ONLINE_COPY (18) - track physical copy of the volume while online (createvvcopy-online command);
+ CONVERT_VV (19) - track tasks that convert a volume from an FPVV to a TPVV, and the reverse;
+ BACKGROUND_COMMAND (20) - track background command tasks;
+ CLX_SYNC (21) - track CLX synchronization tasks;
+ CLX_RECOVERY (22) - track CLX recovery tasks;
+ TUNE_SD (23) - tune copy space;
+ TUNE_VV (24) - tune virtual volume;
+ TUNE_VV_ROLLBACK (25) - tune virtual volume rollback;
+ TUNE_VV_RESTART (26) - tune virtual volume restart;
+ SYSTEM_TUNING (27) - system tuning;
+ NODE_RESCUE (28) - node rescue;
+ REPAIR_SYNC (29) - remote copy repair sync;
+ REMOTE_COPY_SWOVER (30) - remote copy switchover;
+ DEFRAGMENTATION (31) - defragmentation;
+ ENCRYPTION_CHANGE (32) - encryption change;
+ REMOTE_COPY_FAILSAFE (33) - remote copy failsafe;
+ TUNE_TPVV (34) - tune thin virtual volume;
+ REMOTE_COPY_CHG_MODE (35) - remote copy change mode;
+ ONLINE_PROMOTE (37) - online promote snap;
+ RELOCATE_PD (38) - relocate PD;
+ PERIODIC_CSS (39) - remote copy periodic CSS;
+ TUNEVV_LARGE (40) - tune large virtual volume;
+ SD_META_FIXER (41) - compression SD meta fixer;
+ DEDUP_DRYRUN (42) - preview dedup ratio;
+ COMPR_DRYRUN (43) - compression estimation;
+ DEDUP_COMPR_DRYRUN (44) - compression and dedup estimation;
+ UNKNOWN (99) - unknown task type.
+ valuemap:
+ name: 'taskType enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.tasks[?(@.id == "{#ID}")].type.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: task
+ -
+ tag: task
+ value: '{#NAME}'
+ master_item:
+ key: hpe.primera.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#ID}'
+ path: $.id
+ -
+ lld_macro: '{#NAME}'
+ path: $.name
+ -
+ lld_macro: '{#TYPE}'
+ path: $.type
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.tasks
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ -
+ uuid: eb73fcc415c54ac18840d2655f048f6c
+ name: 'Volumes discovery'
+ type: DEPENDENT
+ key: hpe.primera.volumes.discovery
+ delay: '0'
+ filter:
+ evaltype: AND
+ conditions:
+ -
+ macro: '{#NAME}'
+ value: '{$HPE.PRIMERA.VOLUME.NAME.MATCHES}'
+ formulaid: A
+ -
+ macro: '{#NAME}'
+ value: '{$HPE.PRIMERA.VOLUME.NAME.NOT_MATCHES}'
+ operator: NOT_MATCHES_REGEX
+ formulaid: B
+ description: 'List of storage volume resources.'
+ item_prototypes:
+ -
+ uuid: 40db4c8f6d85414e843c97770225f93d
+ name: 'Volume [{#NAME}]: Compaction ratio'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",compaction]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ description: 'The compaction ratio indicates the overall amount of storage space saved with thin technology.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.compaction.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: ab2a583c4b4049a6ac8b7bbc02bda8f5
+ name: 'Volume [{#NAME}]: Storage space saved using compression'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",compression]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ description: 'Indicates the amount of storage space saved using compression.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.compression.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 1c4d78b2dcd64efbbf710ef602a94573
+ name: 'Volume [{#NAME}]: Storage space saved using deduplication'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",deduplication]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ description: 'Indicates the amount of storage space saved using deduplication.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.deduplication.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 3b88e49e88484fe4a77e2a96f6d48322
+ name: 'Volume [{#NAME}]: Overprovisioning ratio'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",overprovisioning]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ description: 'Overprovisioning capacity efficiency ratio.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.overProvisioning.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: f584938e60f94c46b4ed28cc614c797d
+ name: 'Volume [{#NAME}]: Storage space saved using deduplication and compression'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",reduction]'
+ delay: '0'
+ history: 7d
+ value_type: FLOAT
+ description: 'Indicates the amount of storage space saved using deduplication and compression together.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].capacityEfficiency.dataReduction.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 1h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: bb78eda6a941407581f78cf29ef2b647
+ name: 'Volume [{#NAME}]: Administrative space: Free'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.admin["{#ID}",free]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Free administrative space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].adminSpace.freeMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: e4f9f8f5c1cd494896eba973b072fc57
+ name: 'Volume [{#NAME}]: Administrative space: Raw reserved'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.admin["{#ID}",raw_reserved]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Raw reserved administrative space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].adminSpace.rawReservedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: e5a93a042a3b41bab4cf59dc71ec66bf
+ name: 'Volume [{#NAME}]: Administrative space: Reserved'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.admin["{#ID}",reserved]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Reserved administrative space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].adminSpace.reservedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 67e182afc0124cf5913b0499317a7966
+ name: 'Volume [{#NAME}]: Administrative space: Used'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.admin["{#ID}",used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Used administrative space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].adminSpace.usedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 5d4659c72ed3492da143ad9c37e71360
+ name: 'Volume [{#NAME}]: Snapshot space: Free'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.snapshot["{#ID}",free]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Free snapshot space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].snapshotSpace.freeMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: a2c22eed1c004bcc9292b945b5038858
+ name: 'Volume [{#NAME}]: Snapshot space: Raw reserved'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.snapshot["{#ID}",raw_reserved]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Raw reserved snapshot space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].snapshotSpace.rawReservedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: a37265c4598f4179bdcfd816769a1d9b
+ name: 'Volume [{#NAME}]: Snapshot space: Reserved'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.snapshot["{#ID}",reserved]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Reserved snapshot space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].snapshotSpace.reservedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: a278f5ec08c747b085ec53a36357539c
+ name: 'Volume [{#NAME}]: Snapshot space: Used'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.snapshot["{#ID}",used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Used snapshot space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].snapshotSpace.usedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 64825e4092c6450a8ea7fb7bce2d85ce
+ name: 'Volume [{#NAME}]: Total reserved space'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.total["{#ID}",reserved]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total reserved space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].totalReservedMiB.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: af15ec5befd146afbfb2b9cc017d03be
+ name: 'Volume [{#NAME}]: Total space'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.total["{#ID}",size]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Virtual size of volume.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].sizeMiB.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 6000ab524b394e65afe111b65f7b6fd8
+ name: 'Volume [{#NAME}]: Total used space'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.total["{#ID}",used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Total used space. Sum of used user space and used snapshot space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].totalUsedMiB.first()'
+ error_handler: DISCARD_VALUE
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 43c278563b174005ac5302ee48e0cd30
+ name: 'Volume [{#NAME}]: User space: Free'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.user["{#ID}",free]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Free user space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].userSpace.freeMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 97aaefe8dffd4d2eb83f908ac8ad775b
+ name: 'Volume [{#NAME}]: User space: Raw reserved'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.user["{#ID}",raw_reserved]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Raw reserved user space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].userSpace.rawReservedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 135ad5457781492db1cec36787151a71
+ name: 'Volume [{#NAME}]: User space: Reserved'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.user["{#ID}",reserved]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Reserved user space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].userSpace.reservedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 12h
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 3adf03216c4f442693fccbb991c0de3d
+ name: 'Volume [{#NAME}]: User space: Used'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.space.user["{#ID}",used]'
+ delay: '0'
+ history: 7d
+ units: B
+ description: 'Used user space.'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].userSpace.usedMiB.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 10m
+ -
+ type: MULTIPLIER
+ parameters:
+ - '1048576'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: beb17415bd07492d83944da714c492e7
+ name: 'Volume [{#NAME}]: Compression state'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.state["{#ID}",compression]'
+ delay: '0'
+ history: 7d
+ description: |
+ Volume compression state:
+
+ YES (1) - compression is enabled on the volume;
+ NO (2) - compression is disabled on the volume;
+ OFF (3) - compression is turned off;
+ NA (4) - compression is not available on the volume.
+ valuemap:
+ name: 'Volume compressionState enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].compressionState.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 3962d07122a0460fa36c1b151a87717b
+ name: 'Volume [{#NAME}]: Deduplication state'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.state["{#ID}",deduplication]'
+ delay: '0'
+ history: 7d
+ description: |
+ Volume deduplication state:
+
+ YES (1) - enables deduplication on the volume;
+ NO (2) - disables deduplication on the volume;
+ NA (3) - deduplication is not available;
+ OFF (4) - deduplication is turned off.
+ valuemap:
+ name: 'Volume deduplicationState enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].deduplicationState.first()'
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 83222faf4e3e414789e028e0b17350c6
+ name: 'Volume [{#NAME}]: Degraded state'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.state["{#ID}",degraded]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: |
+ Volume detailed state:
+
+ LDS_NOT_STARTED (1) - LDs not started.
+ NOT_STARTED (2) - VV not started.
+ NEEDS_CHECK (3) - check for consistency.
+ NEEDS_MAINT_CHECK (4) - maintenance check is required.
+ INTERNAL_CONSISTENCY_ERROR (5) - internal consistency error.
+ SNAPDATA_INVALID (6) - invalid snapshot data.
+ PRESERVED (7) - unavailable LD sets due to missing chunklets. Preserved remaining VV data.
+ STALE (8) - parts of the VV contain old data because of a copy-on-write operation.
+ COPY_FAILED (9) - a promote or copy operation to this volume failed.
+ DEGRADED_AVAIL (10) - degraded due to availability.
+ DEGRADED_PERF (11) - degraded due to performance.
+ PROMOTING (12) - volume is the current target of a promote operation.
+ COPY_TARGET (13) - volume is the current target of a physical copy operation.
+ RESYNC_TARGET (14) - volume is the current target of a resynchronized copy operation.
+ TUNING (15) - volume tuning is in progress.
+ CLOSING (16) - volume is closing.
+ REMOVING (17) - removing the volume.
+ REMOVING_RETRY (18) - retrying a volume removal operation.
+ CREATING (19) - creating a volume.
+ COPY_SOURCE (20) - copy source.
+ IMPORTING (21) - importing a volume.
+ CONVERTING (22) - converting a volume.
+ INVALID (23) - invalid.
+ EXCLUSIVE (24) -lLocal storage system has exclusive access to the volume.
+ CONSISTENT (25) - volume is being imported consistently along with other volumes in the VV set.
+ STANDBY (26) - volume in standby mode.
+ SD_META_INCONSISTENT (27) - SD Meta Inconsistent.
+ SD_NEEDS_FIX (28) - SD needs fix.
+ SD_META_FIXING (29) - SD meta fix.
+ UNKNOWN (999) - unknown state.
+ NOT_SUPPORTED_BY_WSAPI (1000) - state not supported by WSAPI.
+ valuemap:
+ name: 'Volume detailed state enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].degradedStates.first()'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 462e4b491dd94c78b299178af6d34ca0
+ name: 'Volume [{#NAME}]: Failed state'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.state["{#ID}",failed]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: |
+ Volume detailed state:
+
+ LDS_NOT_STARTED (1) - LDs not started.
+ NOT_STARTED (2) - VV not started.
+ NEEDS_CHECK (3) - check for consistency.
+ NEEDS_MAINT_CHECK (4) - maintenance check is required.
+ INTERNAL_CONSISTENCY_ERROR (5) - internal consistency error.
+ SNAPDATA_INVALID (6) - invalid snapshot data.
+ PRESERVED (7) - unavailable LD sets due to missing chunklets. Preserved remaining VV data.
+ STALE (8) - parts of the VV contain old data because of a copy-on-write operation.
+ COPY_FAILED (9) - a promote or copy operation to this volume failed.
+ DEGRADED_AVAIL (10) - degraded due to availability.
+ DEGRADED_PERF (11) - degraded due to performance.
+ PROMOTING (12) - volume is the current target of a promote operation.
+ COPY_TARGET (13) - volume is the current target of a physical copy operation.
+ RESYNC_TARGET (14) - volume is the current target of a resynchronized copy operation.
+ TUNING (15) - volume tuning is in progress.
+ CLOSING (16) - volume is closing.
+ REMOVING (17) - removing the volume.
+ REMOVING_RETRY (18) - retrying a volume removal operation.
+ CREATING (19) - creating a volume.
+ COPY_SOURCE (20) - copy source.
+ IMPORTING (21) - importing a volume.
+ CONVERTING (22) - converting a volume.
+ INVALID (23) - invalid.
+ EXCLUSIVE (24) - local storage system has exclusive access to the volume.
+ CONSISTENT (25) - volume is being imported consistently along with other volumes in the VV set.
+ STANDBY (26) - volume in standby mode.
+ SD_META_INCONSISTENT (27) - SD Meta Inconsistent.
+ SD_NEEDS_FIX (28) - SD needs fix.
+ SD_META_FIXING (29) - SD meta fix.
+ UNKNOWN (999) - unknown state.
+ NOT_SUPPORTED_BY_WSAPI (1000) - state not supported by WSAPI.
+ valuemap:
+ name: 'Volume detailed state enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].failedStates.first()'
+ -
+ type: JAVASCRIPT
+ parameters:
+ - 'return JSON.stringify(JSON.parse(value));'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ -
+ uuid: 4d4e34fdbac84cada109cbfe9b69812c
+ name: 'Volume [{#NAME}]: State'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.state["{#ID}"]'
+ delay: '0'
+ history: 7d
+ description: |
+ State of the volume:
+
+ NORMAL (1) - normal operation;
+ DEGRADED (2) - degraded state;
+ FAILED (3) - abnormal operation;
+ UNKNOWN (99) - unknown state.
+ valuemap:
+ name: 'State enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].state.first()'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ trigger_prototypes:
+ -
+ uuid: c91920ca3ceb457cb9e2db0bb70d7fe0
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.volume.state["{#ID}"])=2'
+ name: 'Volume [{#NAME}]: Degraded'
+ opdata: 'Current value: {ITEM.LASTVALUE1}'
+ priority: AVERAGE
+ description: 'Volume [{#NAME}] is in degraded state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: 394b5bd072ac41acb0702601c5d5f049
+ expression: 'last(/HPE Primera by HTTP/hpe.primera.volume.state["{#ID}"])=3'
+ name: 'Volume [{#NAME}]: Failed'
+ opdata: 'Current value: {ITEM.LASTVALUE1}'
+ priority: HIGH
+ description: 'Volume [{#NAME}] is in failed state.'
+ tags:
+ -
+ tag: scope
+ value: availability
+ -
+ tag: scope
+ value: capacity
+ -
+ uuid: 582544eb48d04a35ab03a9d01901feb9
+ name: 'Volume [{#NAME}]: Remote copy status'
+ type: DEPENDENT
+ key: 'hpe.primera.volume.status["{#ID}",rcopy]'
+ delay: '0'
+ history: 7d
+ trends: '0'
+ value_type: CHAR
+ description: |
+ Remote copy status of the volume:
+
+ NONE (1) - volume is not associated with remote copy;
+ PRIMARY (2) - volume is the primary copy;
+ SECONDARY (3) - volume is the secondary copy;
+ SNAP (4) - volume is the remote copy snapshot;
+ SYNC (5) - volume is a remote copy snapshot being used for synchronization;
+ DELETE (6) - volume is a remote copy snapshot that is marked for deletion;
+ UNKNOWN (99) - remote copy status is unknown for this volume.
+ valuemap:
+ name: 'rcopyStatus enum'
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - '$.volumes.members[?(@.id == "{#ID}")].rcopyStatus.first()'
+ master_item:
+ key: hpe.primera.data.get
+ tags:
+ -
+ tag: component
+ value: storage
+ -
+ tag: component
+ value: volume
+ -
+ tag: volume
+ value: '{#NAME}'
+ graph_prototypes:
+ -
+ uuid: 8c7139d2b7d94773ad6ef813c7fa59c9
+ name: 'Volume [{#NAME}]: Administrative space'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.admin["{#ID}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.admin["{#ID}",raw_reserved]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.admin["{#ID}",reserved]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.admin["{#ID}",used]'
+ -
+ uuid: c74b88d4e61d4264b804965854eb1da1
+ name: 'Volume [{#NAME}]: Capacity efficiency: Ratio'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",compaction]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",overprovisioning]'
+ -
+ uuid: d2a75ebb20c94eb4ab5e6a9b13d1d439
+ name: 'Volume [{#NAME}]: Capacity efficiency: Space saved'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",compression]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",deduplication]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.capacity.efficiency["{#ID}",reduction]'
+ -
+ uuid: 7214605009354fd382368ec8699d5474
+ name: 'Volume [{#NAME}]: Snapshot space'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.snapshot["{#ID}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.snapshot["{#ID}",raw_reserved]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.snapshot["{#ID}",reserved]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.snapshot["{#ID}",used]'
+ -
+ uuid: 0ad9f609419340168bf7f49de98e0135
+ name: 'Volume [{#NAME}]: User space'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.user["{#ID}",free]'
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.user["{#ID}",raw_reserved]'
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.user["{#ID}",reserved]'
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE Primera by HTTP'
+ key: 'hpe.primera.volume.space.user["{#ID}",used]'
+ master_item:
+ key: hpe.primera.data.get
+ lld_macro_paths:
+ -
+ lld_macro: '{#ID}'
+ path: $.id
+ -
+ lld_macro: '{#NAME}'
+ path: $.name
+ preprocessing:
+ -
+ type: JSONPATH
+ parameters:
+ - $.volumes.members
+ -
+ type: DISCARD_UNCHANGED_HEARTBEAT
+ parameters:
+ - 6h
+ tags:
+ -
+ tag: class
+ value: storage
+ -
+ tag: target
+ value: hpe
+ -
+ tag: target
+ value: primera
+ macros:
+ -
+ macro: '{$HPE.PRIMERA.API.PASSWORD}'
+ type: SECRET_TEXT
+ description: 'Specify password for WSAPI.'
+ -
+ macro: '{$HPE.PRIMERA.API.PORT}'
+ value: '443'
+ description: 'The WSAPI port.'
+ -
+ macro: '{$HPE.PRIMERA.API.SCHEME}'
+ value: https
+ description: 'The WSAPI scheme (http/https).'
+ -
+ macro: '{$HPE.PRIMERA.API.USERNAME}'
+ value: zabbix
+ description: 'Specify user name for WSAPI.'
+ -
+ macro: '{$HPE.PRIMERA.CPG.NAME.MATCHES}'
+ value: '.*'
+ description: 'This macro is used in filters of CPGs discovery rule.'
+ -
+ macro: '{$HPE.PRIMERA.CPG.NAME.NOT_MATCHES}'
+ value: CHANGE_IF_NEEDED
+ description: 'This macro is used in filters of CPGs discovery rule.'
+ -
+ macro: '{$HPE.PRIMERA.DATA.TIMEOUT}'
+ value: 15s
+ description: 'Response timeout for WSAPI.'
+ -
+ macro: '{$HPE.PRIMERA.LLD.FILTER.TASK.NAME.MATCHES}'
+ value: CHANGE_IF_NEEDED
+ description: 'Filter of discoverable tasks by name.'
+ -
+ macro: '{$HPE.PRIMERA.LLD.FILTER.TASK.NAME.NOT_MATCHES}'
+ value: '.*'
+ description: 'Filter to exclude discovered tasks by name.'
+ -
+ macro: '{$HPE.PRIMERA.LLD.FILTER.TASK.TYPE.MATCHES}'
+ value: '.*'
+ description: 'Filter of discoverable tasks by type.'
+ -
+ macro: '{$HPE.PRIMERA.LLD.FILTER.TASK.TYPE.NOT_MATCHES}'
+ value: CHANGE_IF_NEEDED
+ description: 'Filter to exclude discovered tasks by type.'
+ -
+ macro: '{$HPE.PRIMERA.VOLUME.NAME.MATCHES}'
+ value: '.*'
+ description: 'This macro is used in filters of volume discovery rule.'
+ -
+ macro: '{$HPE.PRIMERA.VOLUME.NAME.NOT_MATCHES}'
+ value: ^(admin|.srdata|.mgmtdata)$
+ description: 'This macro is used in filters of volume discovery rule.'
+ valuemaps:
+ -
+ uuid: 79ba0611293541f29f8b43b34e64465d
+ name: Boolean
+ mappings:
+ -
+ value: '0'
+ newvalue: 'No'
+ -
+ value: '1'
+ newvalue: 'Yes'
+ -
+ uuid: fd9a3483b02f45c6836b9a126b669402
+ name: 'diskState enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: Normal
+ -
+ value: '2'
+ newvalue: Degraded
+ -
+ value: '3'
+ newvalue: New
+ -
+ value: '4'
+ newvalue: Failed
+ -
+ value: '99'
+ newvalue: Unknown
+ -
+ uuid: 7513c4c923884ed4b8e35ee9cdf4f627
+ name: 'hardwareType enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: FC
+ -
+ value: '2'
+ newvalue: Eth
+ -
+ value: '3'
+ newvalue: iSCSI
+ -
+ value: '4'
+ newvalue: CNA
+ -
+ value: '5'
+ newvalue: SAS
+ -
+ value: '6'
+ newvalue: Combo
+ -
+ value: '7'
+ newvalue: NVMe
+ -
+ value: '8'
+ newvalue: Unknown
+ -
+ uuid: 5d243add5b534ebfac8ef95d55c4c8c9
+ name: 'portConnType enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: Host
+ -
+ value: '2'
+ newvalue: Disk
+ -
+ value: '3'
+ newvalue: Free
+ -
+ value: '4'
+ newvalue: Iport
+ -
+ value: '5'
+ newvalue: RCFC
+ -
+ value: '6'
+ newvalue: Peer
+ -
+ value: '7'
+ newvalue: RCIP
+ -
+ value: '8'
+ newvalue: ISCSI
+ -
+ value: '9'
+ newvalue: CNA
+ -
+ value: '10'
+ newvalue: FS
+ -
+ uuid: 5762248dd10143a3945c989f0fb73b47
+ name: 'portFailOverState enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: None
+ -
+ value: '2'
+ newvalue: 'Failover pending'
+ -
+ value: '3'
+ newvalue: 'Failed over'
+ -
+ value: '4'
+ newvalue: Active
+ -
+ value: '5'
+ newvalue: 'Active down'
+ -
+ value: '6'
+ newvalue: 'Active failed'
+ -
+ value: '7'
+ newvalue: Failback_pending
+ -
+ uuid: 5a6cdc765c254f17b136c4267fc71349
+ name: 'portLinkState enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: 'Config wait'
+ -
+ value: '2'
+ newvalue: 'ALPA wait'
+ -
+ value: '3'
+ newvalue: 'Login wait'
+ -
+ value: '4'
+ newvalue: 'Link is ready'
+ -
+ value: '5'
+ newvalue: 'Link is loss sync'
+ -
+ value: '6'
+ newvalue: 'In error state'
+ -
+ value: '7'
+ newvalue: xxx
+ -
+ value: '8'
+ newvalue: 'Non participate'
+ -
+ value: '9'
+ newvalue: 'Core dump'
+ -
+ value: '10'
+ newvalue: Offline
+ -
+ value: '11'
+ newvalue: 'FW dead'
+ -
+ value: '12'
+ newvalue: 'Idle for reset'
+ -
+ value: '13'
+ newvalue: 'DHCP in progress'
+ -
+ value: '14'
+ newvalue: 'Pending reset'
+ -
+ value: '15'
+ newvalue: New
+ -
+ value: '16'
+ newvalue: Disabled
+ -
+ value: '17'
+ newvalue: Down
+ -
+ value: '18'
+ newvalue: Failed
+ -
+ value: '19'
+ newvalue: Purging
+ -
+ uuid: eb36574d642d4f9ea51688b4ff971c91
+ name: 'rcopyStatus enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: None
+ -
+ value: '2'
+ newvalue: Primary
+ -
+ value: '3'
+ newvalue: Secondary
+ -
+ value: '4'
+ newvalue: Snap
+ -
+ value: '5'
+ newvalue: Sync
+ -
+ value: '6'
+ newvalue: Delete
+ -
+ value: '99'
+ newvalue: Unknown
+ -
+ uuid: aaa5a863e8524a7088e64a51f5976c98
+ name: 'Service state'
+ mappings:
+ -
+ value: '0'
+ newvalue: Down
+ -
+ value: '1'
+ newvalue: Up
+ -
+ uuid: 86e4337cbc86423fb9f605a9fb3b25b1
+ name: 'State enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: Normal
+ -
+ value: '2'
+ newvalue: Degraded
+ -
+ value: '3'
+ newvalue: Failed
+ -
+ value: '99'
+ newvalue: Unknown
+ -
+ uuid: 5cfd4442b6244399b7aa8b57e9816a4e
+ name: 'taskStatus enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: Done
+ -
+ value: '2'
+ newvalue: Active
+ -
+ value: '3'
+ newvalue: Cancelled
+ -
+ value: '4'
+ newvalue: Failed
+ -
+ uuid: 9a8e1dbbb8f7497c9492fc941fde7177
+ name: 'taskType enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: 'VV copy'
+ -
+ value: '2'
+ newvalue: 'Phys copy resync'
+ -
+ value: '3'
+ newvalue: 'Move regions'
+ -
+ value: '4'
+ newvalue: 'Promote SV'
+ -
+ value: '5'
+ newvalue: 'Remote copy sync'
+ -
+ value: '6'
+ newvalue: 'Remote copy reverse'
+ -
+ value: '7'
+ newvalue: 'Remote copy failover'
+ -
+ value: '8'
+ newvalue: 'Remote copy recover'
+ -
+ value: '9'
+ newvalue: 'Remote copy restore'
+ -
+ value: '10'
+ newvalue: 'Compact CPG'
+ -
+ value: '11'
+ newvalue: 'Compact IDS'
+ -
+ value: '12'
+ newvalue: 'Snapshot accounting'
+ -
+ value: '13'
+ newvalue: 'Check VV'
+ -
+ value: '14'
+ newvalue: 'Scheduled task'
+ -
+ value: '15'
+ newvalue: 'System task'
+ -
+ value: '16'
+ newvalue: 'Background task'
+ -
+ value: '17'
+ newvalue: 'Import VV'
+ -
+ value: '18'
+ newvalue: 'Online copy'
+ -
+ value: '19'
+ newvalue: 'Convert VV'
+ -
+ value: '20'
+ newvalue: 'Background command'
+ -
+ value: '21'
+ newvalue: 'CLX sync'
+ -
+ value: '22'
+ newvalue: 'CLX recovery'
+ -
+ value: '23'
+ newvalue: 'Tune SD'
+ -
+ value: '24'
+ newvalue: 'Tune VV'
+ -
+ value: '25'
+ newvalue: 'Tune VV rollback'
+ -
+ value: '26'
+ newvalue: 'Tune VV restart'
+ -
+ value: '27'
+ newvalue: 'System tuning'
+ -
+ value: '28'
+ newvalue: 'Node rescue'
+ -
+ value: '29'
+ newvalue: 'Repair sync'
+ -
+ value: '30'
+ newvalue: 'Remote copy switchover'
+ -
+ value: '31'
+ newvalue: Defragmentation
+ -
+ value: '32'
+ newvalue: 'Encryption change'
+ -
+ value: '33'
+ newvalue: 'Remote copy failsafe'
+ -
+ value: '34'
+ newvalue: 'Tune TPVV'
+ -
+ value: '35'
+ newvalue: 'Remote copy change mode'
+ -
+ value: '37'
+ newvalue: 'Online promote'
+ -
+ value: '38'
+ newvalue: 'Relocate PD'
+ -
+ value: '39'
+ newvalue: 'Periodic CSS'
+ -
+ value: '40'
+ newvalue: 'Tune VV large'
+ -
+ value: '41'
+ newvalue: 'SD meta fixer'
+ -
+ value: '42'
+ newvalue: 'Dedup dryrun'
+ -
+ value: '43'
+ newvalue: 'Compr dryrun'
+ -
+ value: '44'
+ newvalue: 'Dedup compr dryrun'
+ -
+ value: '99'
+ newvalue: Unknown
+ -
+ uuid: f529111642364bb3a23637adc554e592
+ name: 'Volume compressionState enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: 'Yes'
+ -
+ value: '2'
+ newvalue: 'No'
+ -
+ value: '3'
+ newvalue: 'Off'
+ -
+ value: '4'
+ newvalue: NA
+ -
+ uuid: 716912c7b7d94f8e97fbf911edc9578e
+ name: 'Volume deduplicationState enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: 'Yes'
+ -
+ value: '2'
+ newvalue: 'No'
+ -
+ value: '3'
+ newvalue: NA
+ -
+ value: '4'
+ newvalue: 'Off'
+ -
+ uuid: 6a2355e32bf54483b252f1aaf170aa45
+ name: 'Volume detailed state enum'
+ mappings:
+ -
+ value: '1'
+ newvalue: 'LDS not started'
+ -
+ value: '2'
+ newvalue: 'VV not started'
+ -
+ value: '3'
+ newvalue: 'Needs check'
+ -
+ value: '4'
+ newvalue: 'Needs maint check'
+ -
+ value: '5'
+ newvalue: 'Internal consistency error'
+ -
+ value: '6'
+ newvalue: 'Snapdata invalid'
+ -
+ value: '7'
+ newvalue: Preserved
+ -
+ value: '8'
+ newvalue: Stale
+ -
+ value: '9'
+ newvalue: 'Copy failed'
+ -
+ value: '10'
+ newvalue: 'Degraded avail'
+ -
+ value: '11'
+ newvalue: 'Degraded perf'
+ -
+ value: '12'
+ newvalue: Promoting
+ -
+ value: '13'
+ newvalue: 'Copy target'
+ -
+ value: '14'
+ newvalue: 'Resync target'
+ -
+ value: '15'
+ newvalue: Tuning
+ -
+ value: '16'
+ newvalue: Closing
+ -
+ value: '17'
+ newvalue: Removing
+ -
+ value: '18'
+ newvalue: 'Removing retry'
+ -
+ value: '19'
+ newvalue: Creating
+ -
+ value: '20'
+ newvalue: 'Copy source'
+ -
+ value: '21'
+ newvalue: Importing
+ -
+ value: '22'
+ newvalue: Converting
+ -
+ value: '23'
+ newvalue: Invalid
+ -
+ value: '24'
+ newvalue: Exclusive
+ -
+ value: '25'
+ newvalue: Consistent
+ -
+ value: '26'
+ newvalue: Standby
+ -
+ value: '27'
+ newvalue: 'SD Meta inconsistent'
+ -
+ value: '28'
+ newvalue: 'SD needs fix'
+ -
+ value: '29'
+ newvalue: 'SD meta fix'
+ -
+ value: '999'
+ newvalue: 'Unknown state'
+ -
+ value: '1000'
+ newvalue: 'State not supported by WSAPI'
+ graphs:
+ -
+ uuid: 79e292a64d9247c486812db7a62c0eda
+ name: 'HPE Primera: Capacity'
+ graph_items:
+ -
+ color: 1A7C11
+ item:
+ host: 'HPE Primera by HTTP'
+ key: hpe.primera.system.capacity.allocated
+ -
+ sortorder: '1'
+ color: 2774A4
+ item:
+ host: 'HPE Primera by HTTP'
+ key: hpe.primera.system.capacity.failed
+ -
+ sortorder: '2'
+ color: F63100
+ item:
+ host: 'HPE Primera by HTTP'
+ key: hpe.primera.system.capacity.free
+ -
+ sortorder: '3'
+ color: A54F10
+ item:
+ host: 'HPE Primera by HTTP'
+ key: hpe.primera.system.capacity.total
diff --git a/tests/libs/zbxeval/zbx_eval_parse_expression.yaml b/tests/libs/zbxeval/zbx_eval_parse_expression.yaml
index 00ff02bbbfc..bbdf92e0d8e 100644
--- a/tests/libs/zbxeval/zbx_eval_parse_expression.yaml
+++ b/tests/libs/zbxeval/zbx_eval_parse_expression.yaml
@@ -316,6 +316,23 @@ in:
expression: '{$MACRO}x'
out:
result: FAIL
+---
+test case: Succeed '1-1'
+in:
+ rules: [ZBX_EVAL_PARSE_USERMACRO,ZBX_EVAL_PARSE_MATH,ZBX_EVAL_PARSE_VAR_NUM,ZBX_EVAL_PARSE_COMPOUND_CONST]
+ expression: '1-1'
+out:
+ stack:
+ - type: ZBX_EVAL_TOKEN_VAR_NUM
+ token: '1'
+ opt: 0
+ - type: ZBX_EVAL_TOKEN_VAR_NUM
+ token: '1'
+ opt: 0
+ - type: ZBX_EVAL_TOKEN_OP_SUB
+ token: '-'
+ opt: 0
+ result: SUCCEED
--- # LLD macros
test case: Succeed '{#LLD}'
in:
diff --git a/ui/app/controllers/CControllerPopupGeneric.php b/ui/app/controllers/CControllerPopupGeneric.php
index 04cc5ed6ee1..09afd19ec0b 100644
--- a/ui/app/controllers/CControllerPopupGeneric.php
+++ b/ui/app/controllers/CControllerPopupGeneric.php
@@ -902,7 +902,7 @@ class CControllerPopupGeneric extends CController {
case 'items':
foreach ($records as $itemid => $row) {
- $records[$row['name']] = ['itemid' => $row['name']] + $row;
+ $records[$row['name']] = ['pattern' => $row['name']] + $row;
unset($records[$itemid]);
}
break;
diff --git a/ui/app/partials/configuration.filter.items.php b/ui/app/partials/configuration.filter.items.php
index f3ce4cbd54c..848abcece3e 100644
--- a/ui/app/partials/configuration.filter.items.php
+++ b/ui/app/partials/configuration.filter.items.php
@@ -52,7 +52,7 @@ zbx_add_post_js("var filterTypeSwitcher".
// First column.
$filter_column_1
- ->addRow((new CLabel(_('Host groups'), 'filter_groupid_ms')),
+ ->addRow((new CLabel(_('Host groups'), 'filter_groupids__ms')),
(new CMultiSelect([
'name' => 'filter_groupids[]',
'object_name' => 'hostGroup',
@@ -69,7 +69,7 @@ $filter_column_1
]
]))->setWidth(ZBX_TEXTAREA_FILTER_SMALL_WIDTH)
)
- ->addRow((new CLabel(($data['context'] === 'host') ? _('Hosts') : _('Templates'), 'filter_hostid_ms')),
+ ->addRow((new CLabel(($data['context'] === 'host') ? _('Hosts') : _('Templates'), 'filter_hostids__ms')),
(new CMultiSelect([
'name' => 'filter_hostids[]',
'object_name' => ($data['context'] === 'host') ? 'hosts' : 'templates',
@@ -96,7 +96,7 @@ $filter_column_1
);
if ($data['filter_data']['hosts']) {
- $filter_column_1->addRow(_('Value mapping'),
+ $filter_column_1->addRow(new CLabel(_('Value mapping'), 'filter_valuemapids__ms'),
(new CMultiSelect([
'name' => 'filter_valuemapids[]',
'object_name' => 'valuemap_names',
diff --git a/ui/app/partials/configuration.host.edit.html.php b/ui/app/partials/configuration.host.edit.html.php
index e5b4c511ce6..a1a2f97df3d 100644
--- a/ui/app/partials/configuration.host.edit.html.php
+++ b/ui/app/partials/configuration.host.edit.html.php
@@ -207,7 +207,17 @@ else {
$host_tab
->addItem([
- new CLabel(_('Templates')),
+ new CLabel([
+ _('Templates'),
+ $host_is_discovered
+ ? makeHelpIcon([
+ (new CList([
+ _('Templates linked by host discovery cannot be unlinked.'),
+ _('Use host prototype configuration form to remove automatically linked templates on upcoming discovery.')
+ ]))
+ ])
+ : null
+ ], 'add_templates__ms'),
(new CFormField(
(count($templates_field_items) > 1)
? (new CDiv($templates_field_items))->addClass('linked-templates')
@@ -414,7 +424,7 @@ foreach ($data['inventory_fields'] as $inventory_no => $inventory_field) {
}
$inventory_tab->addItem([
- new CLabel($inventory_field['title']),
+ new CLabel($inventory_field['title'], 'host_inventory['.$field_name.']'),
new CFormField([$input_field, $inventory_item])
]);
}
diff --git a/ui/app/partials/js/configuration.host.edit.html.js.php b/ui/app/partials/js/configuration.host.edit.html.js.php
index c102e0334bb..942ac098292 100644
--- a/ui/app/partials/js/configuration.host.edit.html.js.php
+++ b/ui/app/partials/js/configuration.host.edit.html.js.php
@@ -148,7 +148,7 @@
$groups_ms.on('change', (e) => {
$groups_ms.multiSelect('setDisabledEntries',
- [... document.querySelectorAll('[name^="groups["]')].map((input) => input.value)
+ [... this.form.querySelectorAll('[name^="groups["]')].map((input) => input.value)
);
});
},
diff --git a/ui/app/partials/layout.htmlpage.header.php b/ui/app/partials/layout.htmlpage.header.php
index 621bd06563e..5a343ccc248 100644
--- a/ui/app/partials/layout.htmlpage.header.php
+++ b/ui/app/partials/layout.htmlpage.header.php
@@ -24,7 +24,7 @@
* @var array $data
*/
-global $DB, $ZBX_SERVER, $ZBX_SERVER_NAME, $ZBX_SERVER_PORT;
+global $DB, $ZBX_SERVER_NAME;
$theme = ZBX_DEFAULT_THEME;
$scripts = $data['javascript']['files'];
diff --git a/ui/app/partials/monitoring.latest.filter.php b/ui/app/partials/monitoring.latest.filter.php
index 2afa2fda165..3ecfb6399fd 100644
--- a/ui/app/partials/monitoring.latest.filter.php
+++ b/ui/app/partials/monitoring.latest.filter.php
@@ -29,7 +29,7 @@ $filter_view_data = array_key_exists('filter_view_data', $data) ? $data['filter_
$left_column = (new CFormGrid())
->addClass(CFormGrid::ZBX_STYLE_FORM_GRID_LABEL_WIDTH_TRUE)
->addItem([
- new CLabel(_('Host groups'), 'groupids__ms'),
+ new CLabel(_('Host groups'), 'groupids_#{uniqid}_ms'),
new CFormField(
(new CMultiSelect([
'name' => 'groupids[]',
@@ -53,7 +53,7 @@ $left_column = (new CFormGrid())
)
])
->addItem([
- new CLabel(_('Hosts'), 'hostids__ms'),
+ new CLabel(_('Hosts'), 'hostids_#{uniqid}_ms'),
new CFormField(
(new CMultiSelect([
'name' => 'hostids[]',
@@ -169,7 +169,7 @@ $right_column = (new CFormGrid())
new CFormField($tag_format_line)
])
->addItem([
- new CLabel(_('Tag display priority')),
+ new CLabel(_('Tag display priority'), 'tag_priority_#{uniqid}'),
new CFormField(
(new CTextBox('tag_priority', $data['tag_priority']))
->setWidth(ZBX_TEXTAREA_FILTER_STANDARD_WIDTH)
@@ -179,12 +179,11 @@ $right_column = (new CFormGrid())
)
])
->addItem([
- new CLabel(_('Show details')),
+ new CLabel(_('Show details'), 'show_details'),
new CFormField([
(new CCheckBox('show_details'))
->setChecked($data['show_details'] == 1)
->setUncheckedValue(0)
- ->removeId()
])
]);
diff --git a/ui/app/partials/scheduledreport.formgrid.html.php b/ui/app/partials/scheduledreport.formgrid.html.php
index b4f1576085f..04bb1e18606 100644
--- a/ui/app/partials/scheduledreport.formgrid.html.php
+++ b/ui/app/partials/scheduledreport.formgrid.html.php
@@ -104,7 +104,7 @@ $form_grid
)
])
->addItem([
- new CLabel(_('Start time')),
+ new CLabel(_('Start time'), 'hours'),
new CFormField(
(new CDiv([
(new CNumericBox('hours', $data['hours'], 2))
diff --git a/ui/app/views/administration.token.edit.php b/ui/app/views/administration.token.edit.php
index 5a7605268a9..b7762bd64b9 100644
--- a/ui/app/views/administration.token.edit.php
+++ b/ui/app/views/administration.token.edit.php
@@ -78,7 +78,7 @@ $token_from_list = (new CFormList())
->setChecked($data['expires_state'])
->setUncheckedValue('0')
)
- ->addRow((new CLabel(_('Expires at')))->setAsteriskMark(),
+ ->addRow((new CLabel(_('Expires at'), 'expires_at'))->setAsteriskMark(),
(new CDateSelector('expires_at', $data['expires_at']))
->setDateFormat(ZBX_FULL_DATE_TIME)
->setPlaceholder(_('YYYY-MM-DD hh:mm:ss'))
diff --git a/ui/app/views/administration.user.edit.php b/ui/app/views/administration.user.edit.php
index 8ffffe7203d..1e466b8b4e2 100644
--- a/ui/app/views/administration.user.edit.php
+++ b/ui/app/views/administration.user.edit.php
@@ -409,7 +409,7 @@ if ($data['action'] === 'user.edit') {
);
}
else {
- $permissions_form_list->addRow((new CLabel(_('Role')))->setAsteriskMark(), $role_multiselect);
+ $permissions_form_list->addRow((new CLabel(_('Role'), 'roleid_ms'))->setAsteriskMark(), $role_multiselect);
}
if ($data['roleid']) {
diff --git a/ui/app/views/administration.user.list.php b/ui/app/views/administration.user.list.php
index 4920b58891d..76255e3d866 100644
--- a/ui/app/views/administration.user.list.php
+++ b/ui/app/views/administration.user.list.php
@@ -70,7 +70,7 @@ $widget = (new CWidget())
(new CFormList())->addRow(_('Last name'),
(new CTextBox('filter_surname', $data['filter']['surname']))->setWidth(ZBX_TEXTAREA_FILTER_SMALL_WIDTH)
),
- (new CFormList())->addRow((new CLabel(_('User roles'), 'filter_roles')),
+ (new CFormList())->addRow((new CLabel(_('User roles'), 'filter_roles__ms')),
(new CMultiSelect([
'name' => 'filter_roles[]',
'object_name' => 'roles',
diff --git a/ui/app/views/administration.user.token.edit.php b/ui/app/views/administration.user.token.edit.php
index 85ef8584a9a..51682ff5314 100644
--- a/ui/app/views/administration.user.token.edit.php
+++ b/ui/app/views/administration.user.token.edit.php
@@ -60,12 +60,11 @@ $token_from_list = (new CFormList())
->setChecked($data['expires_state'])
->setUncheckedValue('0')
)
- ->addRow((new CLabel(_('Expires at')))->setAsteriskMark(),
+ ->addRow((new CLabel(_('Expires at'), 'expires_at'))->setAsteriskMark(),
(new CDateSelector('expires_at', $data['expires_at']))
->setDateFormat(ZBX_FULL_DATE_TIME)
->setPlaceholder(_('YYYY-MM-DD hh:mm:ss'))
- ->setAriaRequired(),
- 'expires-at-row'
+ ->setAriaRequired()
)
->addRow(_('Enabled'),
(new CCheckBox('status', ZBX_AUTH_TOKEN_ENABLED))
diff --git a/ui/app/views/monitoring.widget.item.view.php b/ui/app/views/monitoring.widget.item.view.php
index a172ee2fff4..bab325bb799 100644
--- a/ui/app/views/monitoring.widget.item.view.php
+++ b/ui/app/views/monitoring.widget.item.view.php
@@ -168,6 +168,13 @@ function drawValueCell(array $cell_data): array {
$item_content_div->addItem($item_decimals_div);
}
+ // Units AFTER value.
+ if (array_key_exists('units', $cell_data['parts']) && $cell_data['units_pos'] == WIDGET_ITEM_POS_AFTER) {
+ $item_content_div->addItem($units_div);
+ }
+
+ $item_cell[] = $item_content_div;
+
if (array_key_exists('change_indicator', $cell_data['parts'])) {
$change_data = $cell_data['parts']['change_indicator'];
$item_change_div = (new CDiv())->addClass('change-indicator');
@@ -191,13 +198,6 @@ function drawValueCell(array $cell_data): array {
$item_content_div->addItem($item_change_div);
}
- // Units AFTER value.
- if (array_key_exists('units', $cell_data['parts']) && $cell_data['units_pos'] == WIDGET_ITEM_POS_AFTER) {
- $item_content_div->addItem($units_div);
- }
-
- $item_cell[] = $item_content_div;
-
// Units BELOW value.
if (array_key_exists('units', $cell_data['parts']) && $cell_data['units_pos'] == WIDGET_ITEM_POS_BELOW) {
$item_cell[] = $units_div;
diff --git a/ui/app/views/popup.condition.common.php b/ui/app/views/popup.condition.common.php
index 1415ca943af..1366017d2cf 100644
--- a/ui/app/views/popup.condition.common.php
+++ b/ui/app/views/popup.condition.common.php
@@ -69,7 +69,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), [$operator, new CVar('operator', CONDITION_OPERATOR_EQUAL)])
- ->addRow(_('Tag'), $new_condition_tag);
+ ->addRow(new CLabel(_('Tag'), 'tag'), $new_condition_tag);
break;
// New event host group form elements.
@@ -98,7 +98,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow(_('Host groups'), $hostgroup_multiselect);
+ ->addRow(new CLabel(_('Host groups'), 'groupids__ms'), $hostgroup_multiselect);
break;
// Event tag pair form elements.
@@ -115,9 +115,9 @@ switch ($data['type']) {
$inline_js .= $new_condition_newtag->getPostJS();
$form_list
- ->addRow(_('Old tag name'), $new_condition_oldtag)
+ ->addRow(new CLabel(_('Old tag name'), 'oldtag'), $new_condition_oldtag)
->addRow(_('Operator'), [$operator, new CVar('operator', CONDITION_OPERATOR_EQUAL)])
- ->addRow(_('New tag name'), $new_condition_newtag);
+ ->addRow(new CLabel(_('New tag name'), 'newtag'), $new_condition_newtag);
break;
// Old|New event tag value form elements.
@@ -135,9 +135,9 @@ switch ($data['type']) {
$inline_js .= $new_condition_value->getPostJS();
$form_list
- ->addRow(_('Tag'), $new_condition_tag)
+ ->addRow(new CLabel(_('Tag'), 'tag'), $new_condition_tag)
->addRow(_('Operator'), $operator)
- ->addRow(_('Value'), $new_condition_value);
+ ->addRow(new CLabel(_('Value'), 'value'), $new_condition_value);
break;
}
break;
@@ -205,7 +205,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow(_('Triggers'), $trigger_multiselect);
+ ->addRow(new CLabel(_('Triggers'), 'trigger_new_condition_ms'), $trigger_multiselect);
break;
// Trigger severity form elements.
@@ -248,7 +248,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow(_('Hosts'), $host_multiselect);
+ ->addRow(new CLabel(_('Hosts'), 'host_new_condition_ms'), $host_multiselect);
break;
// Host group form elements.
@@ -279,7 +279,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow(_('Host groups'), $hostgroup_multiselect);
+ ->addRow(new CLabel(_('Host groups'), 'hostgroup_new_condition_ms'), $hostgroup_multiselect);
break;
// Problem is suppressed form elements.
@@ -304,7 +304,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow((new CLabel(_('Tag')))->setAsteriskMark(), $new_condition_value);
+ ->addRow((new CLabel(_('Tag'), 'value'))->setAsteriskMark(), $new_condition_value);
break;
// Tag value form elements.
@@ -320,9 +320,9 @@ switch ($data['type']) {
$inline_js .= $new_condition_value->getPostJS();
$form_list
- ->addRow((new CLabel(_('Tag')))->setAsteriskMark(), $new_condition_value2)
+ ->addRow((new CLabel(_('Tag'), 'value2'))->setAsteriskMark(), $new_condition_value2)
->addRow(_('Operator'), $operator)
- ->addRow(_('Value'), $new_condition_value);
+ ->addRow(new CLabel(_('Value'), 'value'), $new_condition_value);
break;
// Template form elements.
@@ -354,7 +354,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow(_('Templates'), $template_multiselect);
+ ->addRow(new CLabel(_('Templates'), 'template_new_condition_ms'), $template_multiselect);
break;
// Time period form elements.
@@ -465,7 +465,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow(_('Discovery rules'), $drule_multiselect);
+ ->addRow(new CLabel(_('Discovery rules'), 'drule_new_condition_ms'), $drule_multiselect);
break;
// Discovery status form elements.
@@ -517,7 +517,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow(_('Proxy'), $proxy_multiselect);
+ ->addRow(new CLabel(_('Proxy'), 'proxy_new_condition_ms'), $proxy_multiselect);
break;
// Received value form elements.
@@ -532,7 +532,7 @@ switch ($data['type']) {
->setFocusableElementId('label-operator')
->addOptions(CSelect::createOptionsFromArray($operators_by_condition[CONDITION_TYPE_DVALUE]))
)
- ->addRow(_('Value'), $new_condition_value);
+ ->addRow(new CLabel(_('Value'), 'value'), $new_condition_value);
break;
// Service port form elements.
@@ -598,7 +598,7 @@ switch ($data['type']) {
$form_list
->addRow(_('Operator'), $operator)
- ->addRow((new CLabel(_('Value')))->setAsteriskMark(), $new_condition_value);
+ ->addRow((new CLabel(_('Value'), 'value'))->setAsteriskMark(), $new_condition_value);
break;
// Event type form elements.
diff --git a/ui/app/views/popup.generic.php b/ui/app/views/popup.generic.php
index 017049ba943..76d2a9533af 100644
--- a/ui/app/views/popup.generic.php
+++ b/ui/app/views/popup.generic.php
@@ -455,14 +455,14 @@ switch ($data['popup_type']) {
$table->addRow([
$data['multiselect']
- ? new CCheckBox('item['.$item[$options['srcfld1']].']', $item['itemid'])
+ ? new CCheckBox('item['.$item['itemid'].']', $item['pattern'])
: null,
(new CLink($item['name'], 'javascript:void(0);'))
->onClick('javascript: addValue('.
json_encode($options['reference']).', '.
- json_encode($item['itemid']).', '.
+ json_encode($item['pattern']).', '.
$options['parentid'].
- ');'.$js_action_onclick),
+ ');'.$js_action_onclick),
(new CDiv($item['key_']))->addClass(ZBX_STYLE_WORDWRAP),
item_type2str($item['type']),
itemValueTypeString($item['value_type']),
@@ -473,7 +473,7 @@ switch ($data['popup_type']) {
]);
$item = [
- 'id' => $item['itemid'],
+ 'id' => $item['pattern'],
'itemid' => $item['itemid'],
'name' => $options['patternselect']
? $item['name']
diff --git a/ui/app/views/popup.import.compare.php b/ui/app/views/popup.import.compare.php
index fc6af676071..afa8d937708 100644
--- a/ui/app/views/popup.import.compare.php
+++ b/ui/app/views/popup.import.compare.php
@@ -170,8 +170,8 @@ else {
'script_inline' => trim($this->readJsFile('popup.import.compare.js.php')),
'body' => !$data['diff']
? (new CTableInfo())
- ->setNoDataMessage(_('No changes.'))
- ->toString()
+ ->setNoDataMessage(_('No changes.'))
+ ->toString()
: (new CForm())
->addClass('import-compare')
->addVar('import_overlayid', $data['import_overlayid'])
diff --git a/ui/app/views/slareport.list.php b/ui/app/views/slareport.list.php
index 48562cba159..0dc67498e31 100644
--- a/ui/app/views/slareport.list.php
+++ b/ui/app/views/slareport.list.php
@@ -37,7 +37,7 @@ $filter = (new CFilter())
(new CFormGrid())
->addClass(CFormGrid::ZBX_STYLE_FORM_GRID_LABEL_WIDTH_TRUE)
->addItem([
- new CLabel(_('SLA'), 'filter_slaid'),
+ new CLabel(_('SLA'), 'filter_slaid_ms'),
new CFormField(
(new CMultiSelect([
'name' => 'filter_slaid',
@@ -57,7 +57,7 @@ $filter = (new CFilter())
]
]))->setWidth(ZBX_TEXTAREA_FILTER_STANDARD_WIDTH)
),
- new CLabel(_('Service'), 'filter_serviceid'),
+ new CLabel(_('Service'), 'filter_serviceid_ms'),
new CFormField(
(new CMultiSelect([
'name' => 'filter_serviceid',
diff --git a/ui/assets/styles/blue-theme.css b/ui/assets/styles/blue-theme.css
index c586efe5a21..219753bdf71 100644
--- a/ui/assets/styles/blue-theme.css
+++ b/ui/assets/styles/blue-theme.css
@@ -2514,7 +2514,7 @@ div.dashboard-grid-widget-tophosts z-bar-gauge {
text-overflow: ellipsis;
white-space: nowrap;
line-height: 14px;
- color: #ffffff;
+ color: #0275b8;
border-bottom: 1px solid #ebeef0;
background: #768d99; }
.toc .toc-arrow {
@@ -5614,19 +5614,23 @@ span.is-loading {
.dashboard-grid-widget-content, div.dashboard-grid-widget-item, .msg-details ul, z-select button.focusable,
.z-select button.focusable, z-select .list,
-.z-select .list, .multiselect-available, textarea, select, .setup-right-body, .overlay-dialogue.modal .overlay-dialogue-body, .overlay-dialogue .hintbox-wrap, .overlay-dialogue .maps-container, .notif-body, .debug-output, .overlay-descr, .overflow-table {
+.z-select .list, .multiselect-available, textarea, select, .setup-right-body, .overlay-dialogue.modal .overlay-dialogue-body, .overlay-dialogue .hintbox-wrap, .overlay-dialogue .maps-container, .notif-body, .debug-output, .overlay-descr, .overflow-table, .import-compare .toc,
+.import-compare .diff {
scrollbar-width: thin; }
.dashboard-grid-widget-content::-webkit-scrollbar, div.dashboard-grid-widget-item::-webkit-scrollbar, .msg-details ul::-webkit-scrollbar, z-select button.focusable::-webkit-scrollbar,
.z-select button.focusable::-webkit-scrollbar, z-select .list::-webkit-scrollbar,
- .z-select .list::-webkit-scrollbar, .multiselect-available::-webkit-scrollbar, textarea::-webkit-scrollbar, select::-webkit-scrollbar, .setup-right-body::-webkit-scrollbar, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar, .overlay-dialogue .maps-container::-webkit-scrollbar, .notif-body::-webkit-scrollbar, .debug-output::-webkit-scrollbar, .overlay-descr::-webkit-scrollbar, .overflow-table::-webkit-scrollbar {
+ .z-select .list::-webkit-scrollbar, .multiselect-available::-webkit-scrollbar, textarea::-webkit-scrollbar, select::-webkit-scrollbar, .setup-right-body::-webkit-scrollbar, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar, .overlay-dialogue .maps-container::-webkit-scrollbar, .notif-body::-webkit-scrollbar, .debug-output::-webkit-scrollbar, .overlay-descr::-webkit-scrollbar, .overflow-table::-webkit-scrollbar, .import-compare .toc::-webkit-scrollbar,
+ .import-compare .diff::-webkit-scrollbar {
width: 9px; }
.dashboard-grid-widget-content::-webkit-scrollbar-track, div.dashboard-grid-widget-item::-webkit-scrollbar-track, .msg-details ul::-webkit-scrollbar-track, z-select button.focusable::-webkit-scrollbar-track,
.z-select button.focusable::-webkit-scrollbar-track, z-select .list::-webkit-scrollbar-track,
- .z-select .list::-webkit-scrollbar-track, .multiselect-available::-webkit-scrollbar-track, textarea::-webkit-scrollbar-track, select::-webkit-scrollbar-track, .setup-right-body::-webkit-scrollbar-track, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-track, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-track, .overlay-dialogue .maps-container::-webkit-scrollbar-track, .notif-body::-webkit-scrollbar-track, .debug-output::-webkit-scrollbar-track, .overlay-descr::-webkit-scrollbar-track, .overflow-table::-webkit-scrollbar-track {
+ .z-select .list::-webkit-scrollbar-track, .multiselect-available::-webkit-scrollbar-track, textarea::-webkit-scrollbar-track, select::-webkit-scrollbar-track, .setup-right-body::-webkit-scrollbar-track, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-track, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-track, .overlay-dialogue .maps-container::-webkit-scrollbar-track, .notif-body::-webkit-scrollbar-track, .debug-output::-webkit-scrollbar-track, .overlay-descr::-webkit-scrollbar-track, .overflow-table::-webkit-scrollbar-track, .import-compare .toc::-webkit-scrollbar-track,
+ .import-compare .diff::-webkit-scrollbar-track {
background-color: rgba(172, 187, 194, 0.55); }
.dashboard-grid-widget-content::-webkit-scrollbar-thumb, div.dashboard-grid-widget-item::-webkit-scrollbar-thumb, .msg-details ul::-webkit-scrollbar-thumb, z-select button.focusable::-webkit-scrollbar-thumb,
.z-select button.focusable::-webkit-scrollbar-thumb, z-select .list::-webkit-scrollbar-thumb,
- .z-select .list::-webkit-scrollbar-thumb, .multiselect-available::-webkit-scrollbar-thumb, textarea::-webkit-scrollbar-thumb, select::-webkit-scrollbar-thumb, .setup-right-body::-webkit-scrollbar-thumb, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-thumb, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-thumb, .overlay-dialogue .maps-container::-webkit-scrollbar-thumb, .notif-body::-webkit-scrollbar-thumb, .debug-output::-webkit-scrollbar-thumb, .overlay-descr::-webkit-scrollbar-thumb, .overflow-table::-webkit-scrollbar-thumb {
+ .z-select .list::-webkit-scrollbar-thumb, .multiselect-available::-webkit-scrollbar-thumb, textarea::-webkit-scrollbar-thumb, select::-webkit-scrollbar-thumb, .setup-right-body::-webkit-scrollbar-thumb, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-thumb, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-thumb, .overlay-dialogue .maps-container::-webkit-scrollbar-thumb, .notif-body::-webkit-scrollbar-thumb, .debug-output::-webkit-scrollbar-thumb, .overlay-descr::-webkit-scrollbar-thumb, .overflow-table::-webkit-scrollbar-thumb, .import-compare .toc::-webkit-scrollbar-thumb,
+ .import-compare .diff::-webkit-scrollbar-thumb {
background-color: rgba(135, 135, 135, 0.85);
border: 1px solid rgba(122, 122, 122, 0.85); }
@@ -6907,7 +6911,7 @@ z-select.z-select-host-interface li[disabled] .description:not(:empty),
.import-compare {
display: flex;
- max-height: calc(100vh - 190px); }
+ max-height: calc(100vh - 220px); }
.import-compare .toc {
flex: 20%;
overflow-y: auto;
diff --git a/ui/assets/styles/dark-theme.css b/ui/assets/styles/dark-theme.css
index 7b0534eff7c..8ea81b93bdd 100644
--- a/ui/assets/styles/dark-theme.css
+++ b/ui/assets/styles/dark-theme.css
@@ -2527,7 +2527,7 @@ div.dashboard-grid-widget-tophosts z-bar-gauge {
text-overflow: ellipsis;
white-space: nowrap;
line-height: 14px;
- color: #f2f2f2;
+ color: #4796c4;
border-bottom: 1px solid #383838;
background: #4f4f4f; }
.toc .toc-arrow {
@@ -5625,19 +5625,23 @@ span.is-loading {
.dashboard-grid-widget-content, div.dashboard-grid-widget-item, .msg-details ul, z-select button.focusable,
.z-select button.focusable, z-select .list,
-.z-select .list, .multiselect-available, textarea, select, .setup-right-body, .overlay-dialogue.modal .overlay-dialogue-body, .overlay-dialogue .hintbox-wrap, .overlay-dialogue .maps-container, .notif-body, .debug-output, .overlay-descr, .overflow-table {
+.z-select .list, .multiselect-available, textarea, select, .setup-right-body, .overlay-dialogue.modal .overlay-dialogue-body, .overlay-dialogue .hintbox-wrap, .overlay-dialogue .maps-container, .notif-body, .debug-output, .overlay-descr, .overflow-table, .import-compare .toc,
+.import-compare .diff {
scrollbar-width: thin; }
.dashboard-grid-widget-content::-webkit-scrollbar, div.dashboard-grid-widget-item::-webkit-scrollbar, .msg-details ul::-webkit-scrollbar, z-select button.focusable::-webkit-scrollbar,
.z-select button.focusable::-webkit-scrollbar, z-select .list::-webkit-scrollbar,
- .z-select .list::-webkit-scrollbar, .multiselect-available::-webkit-scrollbar, textarea::-webkit-scrollbar, select::-webkit-scrollbar, .setup-right-body::-webkit-scrollbar, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar, .overlay-dialogue .maps-container::-webkit-scrollbar, .notif-body::-webkit-scrollbar, .debug-output::-webkit-scrollbar, .overlay-descr::-webkit-scrollbar, .overflow-table::-webkit-scrollbar {
+ .z-select .list::-webkit-scrollbar, .multiselect-available::-webkit-scrollbar, textarea::-webkit-scrollbar, select::-webkit-scrollbar, .setup-right-body::-webkit-scrollbar, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar, .overlay-dialogue .maps-container::-webkit-scrollbar, .notif-body::-webkit-scrollbar, .debug-output::-webkit-scrollbar, .overlay-descr::-webkit-scrollbar, .overflow-table::-webkit-scrollbar, .import-compare .toc::-webkit-scrollbar,
+ .import-compare .diff::-webkit-scrollbar {
width: 9px; }
.dashboard-grid-widget-content::-webkit-scrollbar-track, div.dashboard-grid-widget-item::-webkit-scrollbar-track, .msg-details ul::-webkit-scrollbar-track, z-select button.focusable::-webkit-scrollbar-track,
.z-select button.focusable::-webkit-scrollbar-track, z-select .list::-webkit-scrollbar-track,
- .z-select .list::-webkit-scrollbar-track, .multiselect-available::-webkit-scrollbar-track, textarea::-webkit-scrollbar-track, select::-webkit-scrollbar-track, .setup-right-body::-webkit-scrollbar-track, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-track, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-track, .overlay-dialogue .maps-container::-webkit-scrollbar-track, .notif-body::-webkit-scrollbar-track, .debug-output::-webkit-scrollbar-track, .overlay-descr::-webkit-scrollbar-track, .overflow-table::-webkit-scrollbar-track {
+ .z-select .list::-webkit-scrollbar-track, .multiselect-available::-webkit-scrollbar-track, textarea::-webkit-scrollbar-track, select::-webkit-scrollbar-track, .setup-right-body::-webkit-scrollbar-track, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-track, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-track, .overlay-dialogue .maps-container::-webkit-scrollbar-track, .notif-body::-webkit-scrollbar-track, .debug-output::-webkit-scrollbar-track, .overlay-descr::-webkit-scrollbar-track, .overflow-table::-webkit-scrollbar-track, .import-compare .toc::-webkit-scrollbar-track,
+ .import-compare .diff::-webkit-scrollbar-track {
background-color: #1f1f1f; }
.dashboard-grid-widget-content::-webkit-scrollbar-thumb, div.dashboard-grid-widget-item::-webkit-scrollbar-thumb, .msg-details ul::-webkit-scrollbar-thumb, z-select button.focusable::-webkit-scrollbar-thumb,
.z-select button.focusable::-webkit-scrollbar-thumb, z-select .list::-webkit-scrollbar-thumb,
- .z-select .list::-webkit-scrollbar-thumb, .multiselect-available::-webkit-scrollbar-thumb, textarea::-webkit-scrollbar-thumb, select::-webkit-scrollbar-thumb, .setup-right-body::-webkit-scrollbar-thumb, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-thumb, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-thumb, .overlay-dialogue .maps-container::-webkit-scrollbar-thumb, .notif-body::-webkit-scrollbar-thumb, .debug-output::-webkit-scrollbar-thumb, .overlay-descr::-webkit-scrollbar-thumb, .overflow-table::-webkit-scrollbar-thumb {
+ .z-select .list::-webkit-scrollbar-thumb, .multiselect-available::-webkit-scrollbar-thumb, textarea::-webkit-scrollbar-thumb, select::-webkit-scrollbar-thumb, .setup-right-body::-webkit-scrollbar-thumb, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-thumb, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-thumb, .overlay-dialogue .maps-container::-webkit-scrollbar-thumb, .notif-body::-webkit-scrollbar-thumb, .debug-output::-webkit-scrollbar-thumb, .overlay-descr::-webkit-scrollbar-thumb, .overflow-table::-webkit-scrollbar-thumb, .import-compare .toc::-webkit-scrollbar-thumb,
+ .import-compare .diff::-webkit-scrollbar-thumb {
background-color: #383838;
border: 1px solid #2b2b2b; }
@@ -6918,7 +6922,7 @@ z-select.z-select-host-interface li[disabled] .description:not(:empty),
.import-compare {
display: flex;
- max-height: calc(100vh - 190px); }
+ max-height: calc(100vh - 220px); }
.import-compare .toc {
flex: 20%;
overflow-y: auto;
diff --git a/ui/assets/styles/hc-dark.css b/ui/assets/styles/hc-dark.css
index af32c1d2bf9..152425834a5 100644
--- a/ui/assets/styles/hc-dark.css
+++ b/ui/assets/styles/hc-dark.css
@@ -2503,7 +2503,7 @@ div.dashboard-grid-widget-tophosts z-bar-gauge {
text-overflow: ellipsis;
white-space: nowrap;
line-height: 14px;
- color: #000000;
+ color: #f8f8f8;
border-bottom: 1px solid #333333;
background: #dddddd; }
.toc .toc-arrow {
@@ -5571,19 +5571,23 @@ span.is-loading {
.dashboard-grid-widget-content, div.dashboard-grid-widget-item, .msg-details ul, z-select button.focusable,
.z-select button.focusable, z-select .list,
-.z-select .list, .multiselect-available, textarea, select, .setup-right-body, .overlay-dialogue.modal .overlay-dialogue-body, .overlay-dialogue .hintbox-wrap, .overlay-dialogue .maps-container, .notif-body, .debug-output, .overlay-descr, .overflow-table {
+.z-select .list, .multiselect-available, textarea, select, .setup-right-body, .overlay-dialogue.modal .overlay-dialogue-body, .overlay-dialogue .hintbox-wrap, .overlay-dialogue .maps-container, .notif-body, .debug-output, .overlay-descr, .overflow-table, .import-compare .toc,
+.import-compare .diff {
scrollbar-width: thin; }
.dashboard-grid-widget-content::-webkit-scrollbar, div.dashboard-grid-widget-item::-webkit-scrollbar, .msg-details ul::-webkit-scrollbar, z-select button.focusable::-webkit-scrollbar,
.z-select button.focusable::-webkit-scrollbar, z-select .list::-webkit-scrollbar,
- .z-select .list::-webkit-scrollbar, .multiselect-available::-webkit-scrollbar, textarea::-webkit-scrollbar, select::-webkit-scrollbar, .setup-right-body::-webkit-scrollbar, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar, .overlay-dialogue .maps-container::-webkit-scrollbar, .notif-body::-webkit-scrollbar, .debug-output::-webkit-scrollbar, .overlay-descr::-webkit-scrollbar, .overflow-table::-webkit-scrollbar {
+ .z-select .list::-webkit-scrollbar, .multiselect-available::-webkit-scrollbar, textarea::-webkit-scrollbar, select::-webkit-scrollbar, .setup-right-body::-webkit-scrollbar, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar, .overlay-dialogue .maps-container::-webkit-scrollbar, .notif-body::-webkit-scrollbar, .debug-output::-webkit-scrollbar, .overlay-descr::-webkit-scrollbar, .overflow-table::-webkit-scrollbar, .import-compare .toc::-webkit-scrollbar,
+ .import-compare .diff::-webkit-scrollbar {
width: 9px; }
.dashboard-grid-widget-content::-webkit-scrollbar-track, div.dashboard-grid-widget-item::-webkit-scrollbar-track, .msg-details ul::-webkit-scrollbar-track, z-select button.focusable::-webkit-scrollbar-track,
.z-select button.focusable::-webkit-scrollbar-track, z-select .list::-webkit-scrollbar-track,
- .z-select .list::-webkit-scrollbar-track, .multiselect-available::-webkit-scrollbar-track, textarea::-webkit-scrollbar-track, select::-webkit-scrollbar-track, .setup-right-body::-webkit-scrollbar-track, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-track, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-track, .overlay-dialogue .maps-container::-webkit-scrollbar-track, .notif-body::-webkit-scrollbar-track, .debug-output::-webkit-scrollbar-track, .overlay-descr::-webkit-scrollbar-track, .overflow-table::-webkit-scrollbar-track {
+ .z-select .list::-webkit-scrollbar-track, .multiselect-available::-webkit-scrollbar-track, textarea::-webkit-scrollbar-track, select::-webkit-scrollbar-track, .setup-right-body::-webkit-scrollbar-track, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-track, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-track, .overlay-dialogue .maps-container::-webkit-scrollbar-track, .notif-body::-webkit-scrollbar-track, .debug-output::-webkit-scrollbar-track, .overlay-descr::-webkit-scrollbar-track, .overflow-table::-webkit-scrollbar-track, .import-compare .toc::-webkit-scrollbar-track,
+ .import-compare .diff::-webkit-scrollbar-track {
background-color: #1f1f1f; }
.dashboard-grid-widget-content::-webkit-scrollbar-thumb, div.dashboard-grid-widget-item::-webkit-scrollbar-thumb, .msg-details ul::-webkit-scrollbar-thumb, z-select button.focusable::-webkit-scrollbar-thumb,
.z-select button.focusable::-webkit-scrollbar-thumb, z-select .list::-webkit-scrollbar-thumb,
- .z-select .list::-webkit-scrollbar-thumb, .multiselect-available::-webkit-scrollbar-thumb, textarea::-webkit-scrollbar-thumb, select::-webkit-scrollbar-thumb, .setup-right-body::-webkit-scrollbar-thumb, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-thumb, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-thumb, .overlay-dialogue .maps-container::-webkit-scrollbar-thumb, .notif-body::-webkit-scrollbar-thumb, .debug-output::-webkit-scrollbar-thumb, .overlay-descr::-webkit-scrollbar-thumb, .overflow-table::-webkit-scrollbar-thumb {
+ .z-select .list::-webkit-scrollbar-thumb, .multiselect-available::-webkit-scrollbar-thumb, textarea::-webkit-scrollbar-thumb, select::-webkit-scrollbar-thumb, .setup-right-body::-webkit-scrollbar-thumb, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-thumb, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-thumb, .overlay-dialogue .maps-container::-webkit-scrollbar-thumb, .notif-body::-webkit-scrollbar-thumb, .debug-output::-webkit-scrollbar-thumb, .overlay-descr::-webkit-scrollbar-thumb, .overflow-table::-webkit-scrollbar-thumb, .import-compare .toc::-webkit-scrollbar-thumb,
+ .import-compare .diff::-webkit-scrollbar-thumb {
background-color: #999999;
border: 1px solid #4d4d4d; }
@@ -6864,7 +6868,7 @@ z-select.z-select-host-interface li[disabled] .description:not(:empty),
.import-compare {
display: flex;
- max-height: calc(100vh - 190px); }
+ max-height: calc(100vh - 220px); }
.import-compare .toc {
flex: 20%;
overflow-y: auto;
diff --git a/ui/assets/styles/hc-light.css b/ui/assets/styles/hc-light.css
index bbadfa23e01..7222a2654c9 100644
--- a/ui/assets/styles/hc-light.css
+++ b/ui/assets/styles/hc-light.css
@@ -2503,7 +2503,7 @@ div.dashboard-grid-widget-tophosts z-bar-gauge {
text-overflow: ellipsis;
white-space: nowrap;
line-height: 14px;
- color: #ffffff;
+ color: #555555;
border-bottom: 1px solid #888888;
background: #333333; }
.toc .toc-arrow {
@@ -5571,19 +5571,23 @@ span.is-loading {
.dashboard-grid-widget-content, div.dashboard-grid-widget-item, .msg-details ul, z-select button.focusable,
.z-select button.focusable, z-select .list,
-.z-select .list, .multiselect-available, textarea, select, .setup-right-body, .overlay-dialogue.modal .overlay-dialogue-body, .overlay-dialogue .hintbox-wrap, .overlay-dialogue .maps-container, .notif-body, .debug-output, .overlay-descr, .overflow-table {
+.z-select .list, .multiselect-available, textarea, select, .setup-right-body, .overlay-dialogue.modal .overlay-dialogue-body, .overlay-dialogue .hintbox-wrap, .overlay-dialogue .maps-container, .notif-body, .debug-output, .overlay-descr, .overflow-table, .import-compare .toc,
+.import-compare .diff {
scrollbar-width: thin; }
.dashboard-grid-widget-content::-webkit-scrollbar, div.dashboard-grid-widget-item::-webkit-scrollbar, .msg-details ul::-webkit-scrollbar, z-select button.focusable::-webkit-scrollbar,
.z-select button.focusable::-webkit-scrollbar, z-select .list::-webkit-scrollbar,
- .z-select .list::-webkit-scrollbar, .multiselect-available::-webkit-scrollbar, textarea::-webkit-scrollbar, select::-webkit-scrollbar, .setup-right-body::-webkit-scrollbar, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar, .overlay-dialogue .maps-container::-webkit-scrollbar, .notif-body::-webkit-scrollbar, .debug-output::-webkit-scrollbar, .overlay-descr::-webkit-scrollbar, .overflow-table::-webkit-scrollbar {
+ .z-select .list::-webkit-scrollbar, .multiselect-available::-webkit-scrollbar, textarea::-webkit-scrollbar, select::-webkit-scrollbar, .setup-right-body::-webkit-scrollbar, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar, .overlay-dialogue .maps-container::-webkit-scrollbar, .notif-body::-webkit-scrollbar, .debug-output::-webkit-scrollbar, .overlay-descr::-webkit-scrollbar, .overflow-table::-webkit-scrollbar, .import-compare .toc::-webkit-scrollbar,
+ .import-compare .diff::-webkit-scrollbar {
width: 9px; }
.dashboard-grid-widget-content::-webkit-scrollbar-track, div.dashboard-grid-widget-item::-webkit-scrollbar-track, .msg-details ul::-webkit-scrollbar-track, z-select button.focusable::-webkit-scrollbar-track,
.z-select button.focusable::-webkit-scrollbar-track, z-select .list::-webkit-scrollbar-track,
- .z-select .list::-webkit-scrollbar-track, .multiselect-available::-webkit-scrollbar-track, textarea::-webkit-scrollbar-track, select::-webkit-scrollbar-track, .setup-right-body::-webkit-scrollbar-track, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-track, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-track, .overlay-dialogue .maps-container::-webkit-scrollbar-track, .notif-body::-webkit-scrollbar-track, .debug-output::-webkit-scrollbar-track, .overlay-descr::-webkit-scrollbar-track, .overflow-table::-webkit-scrollbar-track {
+ .z-select .list::-webkit-scrollbar-track, .multiselect-available::-webkit-scrollbar-track, textarea::-webkit-scrollbar-track, select::-webkit-scrollbar-track, .setup-right-body::-webkit-scrollbar-track, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-track, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-track, .overlay-dialogue .maps-container::-webkit-scrollbar-track, .notif-body::-webkit-scrollbar-track, .debug-output::-webkit-scrollbar-track, .overlay-descr::-webkit-scrollbar-track, .overflow-table::-webkit-scrollbar-track, .import-compare .toc::-webkit-scrollbar-track,
+ .import-compare .diff::-webkit-scrollbar-track {
background-color: #999999; }
.dashboard-grid-widget-content::-webkit-scrollbar-thumb, div.dashboard-grid-widget-item::-webkit-scrollbar-thumb, .msg-details ul::-webkit-scrollbar-thumb, z-select button.focusable::-webkit-scrollbar-thumb,
.z-select button.focusable::-webkit-scrollbar-thumb, z-select .list::-webkit-scrollbar-thumb,
- .z-select .list::-webkit-scrollbar-thumb, .multiselect-available::-webkit-scrollbar-thumb, textarea::-webkit-scrollbar-thumb, select::-webkit-scrollbar-thumb, .setup-right-body::-webkit-scrollbar-thumb, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-thumb, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-thumb, .overlay-dialogue .maps-container::-webkit-scrollbar-thumb, .notif-body::-webkit-scrollbar-thumb, .debug-output::-webkit-scrollbar-thumb, .overlay-descr::-webkit-scrollbar-thumb, .overflow-table::-webkit-scrollbar-thumb {
+ .z-select .list::-webkit-scrollbar-thumb, .multiselect-available::-webkit-scrollbar-thumb, textarea::-webkit-scrollbar-thumb, select::-webkit-scrollbar-thumb, .setup-right-body::-webkit-scrollbar-thumb, .overlay-dialogue.modal .overlay-dialogue-body::-webkit-scrollbar-thumb, .overlay-dialogue .hintbox-wrap::-webkit-scrollbar-thumb, .overlay-dialogue .maps-container::-webkit-scrollbar-thumb, .notif-body::-webkit-scrollbar-thumb, .debug-output::-webkit-scrollbar-thumb, .overlay-descr::-webkit-scrollbar-thumb, .overflow-table::-webkit-scrollbar-thumb, .import-compare .toc::-webkit-scrollbar-thumb,
+ .import-compare .diff::-webkit-scrollbar-thumb {
background-color: #f9f9f9;
border: 1px solid #adadad; }
@@ -6864,7 +6868,7 @@ z-select.z-select-host-interface li[disabled] .description:not(:empty),
.import-compare {
display: flex;
- max-height: calc(100vh - 190px); }
+ max-height: calc(100vh - 220px); }
.import-compare .toc {
flex: 20%;
overflow-y: auto;
diff --git a/ui/chart4.php b/ui/chart4.php
index f20c2367167..3c4126d029f 100644
--- a/ui/chart4.php
+++ b/ui/chart4.php
@@ -138,9 +138,9 @@ $maxX = 900;
$minX = 0;
for ($i = 1; $i <= $weeks; $i++) {
- $x1 = (900 / 52) * $sizeX * ($i - 1 - $minX) / ($maxX - $minX);
+ $x1 = (int) ((900 / 52) * $sizeX * ($i - 1 - $minX) / ($maxX - $minX));
- $yt = $sizeY * $true[$i - 1] / 100;
+ $yt = (int) ($sizeY * $true[$i - 1] / 100);
if ($yt > 0) {
imagefilledrectangle($im, $x1 + $shiftX, $shiftYup, $x1 + $shiftX + 8, $yt + $shiftYup, imagecolorallocate($im, 235, 120, 120)); // red
}
diff --git a/ui/hostinventoriesoverview.php b/ui/hostinventoriesoverview.php
index 661d35c27c5..7ebfa6aef3a 100644
--- a/ui/hostinventoriesoverview.php
+++ b/ui/hostinventoriesoverview.php
@@ -69,10 +69,6 @@ $filter = [
$ms_groups = [];
$filter_groupids = $filter['groups'] ? getSubGroups($filter['groups'], $ms_groups) : null;
-if (count($ms_groups) != count($filter['groups'])) {
- show_error_message(_('No permissions to referred object or it does not exist!'));
-}
-
$inventories = [];
foreach (getHostInventories() as $inventory) {
$inventories[$inventory['db_field']] = $inventory['title'];
diff --git a/ui/include/classes/core/CConfigFile.php b/ui/include/classes/core/CConfigFile.php
index b29a3813d7a..2997ac348b2 100644
--- a/ui/include/classes/core/CConfigFile.php
+++ b/ui/include/classes/core/CConfigFile.php
@@ -154,11 +154,11 @@ class CConfigFile {
$this->config['DB']['VAULT_TOKEN'] = $DB['VAULT_TOKEN'];
}
- if (isset($ZBX_SERVER)) {
+ if (isset($ZBX_SERVER) && $ZBX_SERVER !== '') {
$this->config['ZBX_SERVER'] = $ZBX_SERVER;
}
- if (isset($ZBX_SERVER_PORT)) {
+ if (isset($ZBX_SERVER_PORT) && $ZBX_SERVER_PORT !== '') {
$this->config['ZBX_SERVER_PORT'] = $ZBX_SERVER_PORT;
}
diff --git a/ui/include/classes/core/ZBase.php b/ui/include/classes/core/ZBase.php
index 57aecf30828..6c3afe84c07 100644
--- a/ui/include/classes/core/ZBase.php
+++ b/ui/include/classes/core/ZBase.php
@@ -490,7 +490,11 @@ class ZBase {
$action_class = $router->getController();
try {
- if (!class_exists($action_class, true)) {
+ if ($action_class === null) {
+ throw new Exception(_('Class not found.'));
+ }
+
+ if (!class_exists($action_class)) {
throw new Exception(_s('Class %1$s not found for action %2$s.', $action_class, $action_name));
}
diff --git a/ui/include/classes/data/CItemData.php b/ui/include/classes/data/CItemData.php
index 67e1321249a..398c9948873 100644
--- a/ui/include/classes/data/CItemData.php
+++ b/ui/include/classes/data/CItemData.php
@@ -1665,7 +1665,7 @@ final class CItemData {
'value_type' => ITEM_VALUE_TYPE_UINT64
],
'zabbix[queue,<from>,<to>]' => [
- 'description' => _('Number of items in the queue which are delayed by from to to seconds, inclusive.'),
+ 'description' => _('Number of items in the queue which are delayed by from to seconds, inclusive.'),
'value_type' => ITEM_VALUE_TYPE_UINT64
],
'zabbix[rcache,<cache>,<mode>]' => [
diff --git a/ui/include/classes/db/OracleDbBackend.php b/ui/include/classes/db/OracleDbBackend.php
index 291e85d6737..bffdba41387 100644
--- a/ui/include/classes/db/OracleDbBackend.php
+++ b/ui/include/classes/db/OracleDbBackend.php
@@ -30,7 +30,7 @@ class OracleDbBackend extends DbBackend {
* @return boolean
*/
protected function checkDbVersionTable() {
- $table_exists = DBfetch(DBselect("SELECT table_name FROM user_tables WHERE table_name='DBVERSION'"));
+ $table_exists = DBfetch(DBselect("SELECT table_name FROM all_tables WHERE table_name='DBVERSION'"));
if (!$table_exists) {
$this->setError(_s('Unable to determine current Zabbix database version: %1$s.',
diff --git a/ui/include/classes/graphdraw/CGraphDraw.php b/ui/include/classes/graphdraw/CGraphDraw.php
index ef42dff9f57..52107270fdd 100644
--- a/ui/include/classes/graphdraw/CGraphDraw.php
+++ b/ui/include/classes/graphdraw/CGraphDraw.php
@@ -124,13 +124,9 @@ abstract class CGraphDraw {
// i should rename no alpha to alpha at some point to get rid of some confusion
foreach ($this->colorsrgb as $name => $RGBA) {
- if (isset($RGBA[3]) && function_exists('imagecolorexactalpha')
- && function_exists('imagecreatetruecolor') && @imagecreatetruecolor(1, 1)) {
- $this->colors[$name] = imagecolorexactalpha($this->im, $RGBA[0], $RGBA[1], $RGBA[2], $RGBA[3]);
- }
- else {
- $this->colors[$name] = imagecolorallocate($this->im, $RGBA[0], $RGBA[1], $RGBA[2]);
- }
+ $this->colors[$name] = array_key_exists(3, $RGBA)
+ ? imagecolorexactalpha($this->im, $RGBA[0], $RGBA[1], $RGBA[2], $RGBA[3])
+ : imagecolorallocate($this->im, $RGBA[0], $RGBA[1], $RGBA[2]);
}
}
@@ -259,7 +255,7 @@ abstract class CGraphDraw {
return get_color($this->im, $color, $alfa);
}
- public function getShadow($color, $alfa = 0) {
+ public function getShadow($color, $alpha = 0) {
if (isset($this->colorsrgb[$color])) {
$red = $this->colorsrgb[$color][0];
$green = $this->colorsrgb[$color][1];
@@ -270,16 +266,15 @@ abstract class CGraphDraw {
}
if ($this->sum > 0) {
- $red = (int)($red * 0.6);
- $green = (int)($green * 0.6);
- $blue = (int)($blue * 0.6);
+ $red = (int) ($red * 0.6);
+ $green = (int) ($green * 0.6);
+ $blue = (int) ($blue * 0.6);
}
$RGB = [$red, $green, $blue];
- if (isset($alfa) && function_exists('imagecolorexactalpha') && function_exists('imagecreatetruecolor')
- && @imagecreatetruecolor(1, 1)) {
- return imagecolorexactalpha($this->im, $RGB[0], $RGB[1], $RGB[2], $alfa);
+ if ($alpha != 0) {
+ return imagecolorexactalpha($this->im, $RGB[0], $RGB[1], $RGB[2], $alpha);
}
return imagecolorallocate($this->im, $RGB[0], $RGB[1], $RGB[2]);
diff --git a/ui/include/classes/graphdraw/CLineGraphDraw.php b/ui/include/classes/graphdraw/CLineGraphDraw.php
index 4b9df8f15b5..a4988b1e782 100644
--- a/ui/include/classes/graphdraw/CLineGraphDraw.php
+++ b/ui/include/classes/graphdraw/CLineGraphDraw.php
@@ -722,16 +722,17 @@ class CLineGraphDraw extends CGraphDraw {
$gbColor
);
- imagefilledpolygon(
- $this->im,
- [
- $this->shiftXleft + $this->shiftXCaption - 3, $this->shiftY - 5,
- $this->shiftXleft + $this->shiftXCaption + 3, $this->shiftY - 5,
- $this->shiftXleft + $this->shiftXCaption, $this->shiftY - 10
- ],
- 3,
- $this->getColor('White')
- );
+ $points = [
+ $this->shiftXleft + $this->shiftXCaption - 3, $this->shiftY - 5,
+ $this->shiftXleft + $this->shiftXCaption + 3, $this->shiftY - 5,
+ $this->shiftXleft + $this->shiftXCaption, $this->shiftY - 10
+ ];
+ if (PHP_VERSION_ID >= 80100) {
+ imagefilledpolygon($this->im, $points, $this->getColor('White'));
+ }
+ else {
+ imagefilledpolygon($this->im, $points, 3, $this->getColor('White'));
+ }
/* draw left axis triangle */
zbx_imageline($this->im, $this->shiftXleft + $this->shiftXCaption - 3, $this->shiftY - 5,
@@ -765,16 +766,17 @@ class CLineGraphDraw extends CGraphDraw {
$gbColor
);
- imagefilledpolygon(
- $this->im,
- [
- $this->sizeX + $this->shiftXleft + $this->shiftXCaption - 3, $this->shiftY - 5,
- $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 3, $this->shiftY - 5,
- $this->sizeX + $this->shiftXleft + $this->shiftXCaption, $this->shiftY - 10
- ],
- 3,
- $this->getColor('White')
- );
+ $points = [
+ $this->sizeX + $this->shiftXleft + $this->shiftXCaption - 3, $this->shiftY - 5,
+ $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 3, $this->shiftY - 5,
+ $this->sizeX + $this->shiftXleft + $this->shiftXCaption, $this->shiftY - 10
+ ];
+ if (PHP_VERSION_ID >= 80100) {
+ imagefilledpolygon($this->im, $points, $this->getColor('White'));
+ }
+ else {
+ imagefilledpolygon($this->im, $points, 3, $this->getColor('White'));
+ }
/* draw right axis triangle */
zbx_imageline($this->im, $this->sizeX + $this->shiftXleft + $this->shiftXCaption - 3, $this->shiftY - 5,
@@ -807,16 +809,17 @@ class CLineGraphDraw extends CGraphDraw {
$gbColor
);
- imagefilledpolygon(
- $this->im,
- [
- $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 5, $this->sizeY + $this->shiftY - 2,
- $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 5, $this->sizeY + $this->shiftY + 4,
- $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 10, $this->sizeY + $this->shiftY + 1
- ],
- 3,
- $this->getColor('White')
- );
+ $points = [
+ $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 5, $this->sizeY + $this->shiftY - 2,
+ $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 5, $this->sizeY + $this->shiftY + 4,
+ $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 10, $this->sizeY + $this->shiftY + 1
+ ];
+ if (PHP_VERSION_ID >= 80100) {
+ imagefilledpolygon($this->im, $points, $this->getColor('White'));
+ }
+ else {
+ imagefilledpolygon($this->im, $points, 3, $this->getColor('White'));
+ }
/* draw X axis triangle */
zbx_imageline($this->im, $this->sizeX + $this->shiftXleft + $this->shiftXCaption + 5, $this->sizeY + $this->shiftY - 2,
@@ -1392,13 +1395,7 @@ class CLineGraphDraw extends CGraphDraw {
}
// draw color square
- if (function_exists('imagecolorexactalpha') && function_exists('imagecreatetruecolor') && @imagecreatetruecolor(1, 1)) {
- $colorSquare = imagecreatetruecolor(11, 11);
- }
- else {
- $colorSquare = imagecreate(11, 11);
- }
-
+ $colorSquare = imagecreatetruecolor(11, 11);
imagefill($colorSquare, 0, 0, $this->getColor($this->graphtheme['backgroundcolor'], 0));
imagefilledrectangle($colorSquare, 0, 0, 10, 10, $color);
imagerectangle($colorSquare, 0, 0, 10, 10, $this->getColor('Black'));
@@ -1512,27 +1509,30 @@ class CLineGraphDraw extends CGraphDraw {
? $this->graphtheme['leftpercentilecolor']
: $this->graphtheme['rightpercentilecolor'];
- imagefilledpolygon(
- $this->im,
- [
- $leftXShift + 5, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y,
- $leftXShift - 5, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y,
- $leftXShift, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y - 10
- ],
- 3,
- $this->getColor($color)
- );
+ $points = [
+ $leftXShift + 5, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y,
+ $leftXShift - 5, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y,
+ $leftXShift, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y - 10
+ ];
+ if (PHP_VERSION_ID >= 80100) {
+ imagefilledpolygon($this->im, $points, $this->getColor($color));
+ }
+ else {
+ imagefilledpolygon($this->im, $points, 3, $this->getColor($color));
+ }
+
+ $points = [
+ $leftXShift + 5, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y,
+ $leftXShift - 5, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y,
+ $leftXShift, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y - 10
+ ];
+ if (PHP_VERSION_ID >= 80100) {
+ imagepolygon($this->im, $points, $this->getColor('Black No Alpha'));
+ }
+ else {
+ imagepolygon($this->im, $points, 3, $this->getColor('Black No Alpha'));
+ }
- imagepolygon(
- $this->im,
- [
- $leftXShift + 5, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y,
- $leftXShift - 5, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y,
- $leftXShift, $this->sizeY + $this->shiftY + 14 * $rowNum + self::LEGEND_OFFSET_Y - 10
- ],
- 3,
- $this->getColor('Black No Alpha')
- );
$rowNum++;
}
}
@@ -1647,14 +1647,14 @@ class CLineGraphDraw extends CGraphDraw {
$x1 = $from + $this->shiftXleft - 1;
$x2 = $to + $this->shiftXleft;
- $y1min = $zero - ($min_from - $oxy) / $unit2px;
- $y2min = $zero - ($min_to - $oxy) / $unit2px;
+ $y1min = (int) round($zero - ($min_from - $oxy) / $unit2px);
+ $y2min = (int) round($zero - ($min_to - $oxy) / $unit2px);
- $y1max = $zero - ($max_from - $oxy) / $unit2px;
- $y2max = $zero - ($max_to - $oxy) / $unit2px;
+ $y1max = (int) round($zero - ($max_from - $oxy) / $unit2px);
+ $y2max = (int) round($zero - ($max_to - $oxy) / $unit2px);
- $y1avg = $zero - ($avg_from - $oxy) / $unit2px;
- $y2avg = $zero - ($avg_to - $oxy) / $unit2px;
+ $y1avg = (int) round($zero - ($avg_from - $oxy) / $unit2px);
+ $y2avg = (int) round($zero - ($avg_to - $oxy) / $unit2px);
switch ($calc_fnc) {
case CALC_FNC_MAX:
@@ -1734,7 +1734,13 @@ class CLineGraphDraw extends CGraphDraw {
$style = $drawtype == GRAPH_ITEM_DRAWTYPE_BOLD_LINE ? LINE_TYPE_BOLD : LINE_TYPE_NORMAL;
if ($calc_fnc == CALC_FNC_ALL) {
- imagefilledpolygon($this->im, $a, 4, $minmax_color);
+ if (PHP_VERSION_ID >= 80100) {
+ imagefilledpolygon($this->im, $a, $avg_color);
+ }
+ else {
+ imagefilledpolygon($this->im, $a, 4, $minmax_color);
+ }
+
if (!$y1x || !$y2x) {
zbx_imagealine($this->im, $x1, $y1max, $x2, $y2max, $max_color, $style);
}
@@ -1755,15 +1761,8 @@ class CLineGraphDraw extends CGraphDraw {
break;
case GRAPH_ITEM_DRAWTYPE_DASHED_LINE:
- if (function_exists('imagesetstyle')) {
- // use imagesetstyle+imageline instead of bugged imagedashedline
- $style = [$avg_color, $avg_color, IMG_COLOR_TRANSPARENT, IMG_COLOR_TRANSPARENT];
- imagesetstyle($this->im, $style);
- zbx_imageline($this->im, $x1, $y1, $x2, $y2, IMG_COLOR_STYLED);
- }
- else {
- imagedashedline($this->im, $x1, $y1, $x2, $y2, $avg_color);
- }
+ imagesetstyle($this->im, [$avg_color, $avg_color, IMG_COLOR_TRANSPARENT, IMG_COLOR_TRANSPARENT]);
+ zbx_imageline($this->im, $x1, $y1, $x2, $y2, IMG_COLOR_STYLED);
break;
case GRAPH_ITEM_DRAWTYPE_GRADIENT_LINE:
@@ -1784,7 +1783,12 @@ class CLineGraphDraw extends CGraphDraw {
$a[6] = $x2;
$a[7] = $y2;
- imagefilledpolygon($this->im, $a, 4, $avg_color);
+ if (PHP_VERSION_ID >= 80100) {
+ imagefilledpolygon($this->im, $a, $avg_color);
+ }
+ else {
+ imagefilledpolygon($this->im, $a, 4, $avg_color);
+ }
}
else {
imageLine($this->im, $x1, $y1, $x2, $y2, $avg_color); // draw the initial line
@@ -1817,15 +1821,12 @@ class CLineGraphDraw extends CGraphDraw {
$steps = $this->sizeY + $this->shiftY - $gy + 1;
for ($j = 0; $j < $steps; $j++) {
- if (($gy + $j) < ($this->shiftY + $startAlpha)) {
- $alpha = 0;
- }
- else {
- $alpha = 127 - abs(127 - ($alphaRatio * ($gy + $j - $this->shiftY - $startAlpha)));
- }
+ $alpha = ($gy + $j) < ($this->shiftY + $startAlpha)
+ ? 0
+ : 127 - (int) abs(127 - ($alphaRatio * ($gy + $j - $this->shiftY - $startAlpha)));
$color = imagecolorexactalpha($this->im, $red, $green, $blue, $alpha);
- imagesetpixel($this->im, $x2 + $i, $gy + $j, $color);
+ imagesetpixel($this->im, $x2 + $i, (int) $gy + $j, $color);
}
}
}
@@ -2040,14 +2041,7 @@ class CLineGraphDraw extends CGraphDraw {
$this->selectTriggers();
$this->calcDimensions();
- if (function_exists('imagecolorexactalpha') && function_exists('imagecreatetruecolor')
- && @imagecreatetruecolor(1, 1)
- ) {
- $this->im = imagecreatetruecolor(1, 1);
- }
- else {
- $this->im = imagecreate(1, 1);
- }
+ $this->im = imagecreatetruecolor(1, 1);
$this->initColors();
@@ -2077,13 +2071,7 @@ class CLineGraphDraw extends CGraphDraw {
$this->calcPercentile();
$this->calcZero();
- if (function_exists('imagecolorexactalpha') && function_exists('imagecreatetruecolor')
- && @imagecreatetruecolor(1, 1)) {
- $this->im = imagecreatetruecolor($this->fullSizeX, $this->fullSizeY);
- }
- else {
- $this->im = imagecreate($this->fullSizeX, $this->fullSizeY);
- }
+ $this->im = imagecreatetruecolor($this->fullSizeX, $this->fullSizeY);
$this->initColors();
$this->drawRectangle();
diff --git a/ui/include/classes/graphdraw/CPieGraphDraw.php b/ui/include/classes/graphdraw/CPieGraphDraw.php
index ab7557d9bf1..c69ad86fc4c 100644
--- a/ui/include/classes/graphdraw/CPieGraphDraw.php
+++ b/ui/include/classes/graphdraw/CPieGraphDraw.php
@@ -86,22 +86,24 @@ class CPieGraphDraw extends CGraphDraw {
$count *= $this->exploderad;
$anglemid = (int) (($anglestart + $angleend) / 2);
- $y+= round($count * sin(deg2rad($anglemid)));
- $x+= round($count * cos(deg2rad($anglemid)));
+ $y += round($count * sin(deg2rad($anglemid)));
+ $x += round($count * cos(deg2rad($anglemid)));
- return [$x, $y];
+ return [(int) $x, (int) $y];
}
protected function calcExplodedRadius($sizeX, $sizeY, $count) {
$count *= $this->exploderad * 2;
$sizeX -= $count;
$sizeY -= $count;
- return [$sizeX, $sizeY];
+
+ return [(int) $sizeX, (int) $sizeY];
}
protected function calc3DAngle($sizeX, $sizeY) {
$sizeY *= GRAPH_3D_ANGLE / 90;
- return [$sizeX, round($sizeY)];
+
+ return [$sizeX, (int) round($sizeY)];
}
protected function selectData() {
@@ -432,8 +434,8 @@ class CPieGraphDraw extends CGraphDraw {
list($sizeX, $sizeY) = $this->calcExplodedRadius($sizeX, $sizeY, count($values));
}
- $xc = $x = (int) $this->sizeX / 2 + $this->shiftXleft;
- $yc = $y = (int) $this->sizeY / 2 + $this->shiftY;
+ $xc = $x = (int) ($this->sizeX / 2) + $this->shiftXleft;
+ $yc = $y = (int) ($this->sizeY / 2) + $this->shiftY;
$anglestart = 0;
$angleend = 0;
@@ -513,8 +515,8 @@ class CPieGraphDraw extends CGraphDraw {
list($sizeX, $sizeY) = $this->calc3DAngle($sizeX, $sizeY);
- $xc = $x = (int) $this->sizeX / 2 + $this->shiftXleft;
- $yc = $y = (int) $this->sizeY / 2 + $this->shiftY;
+ $xc = $x = (int) ($this->sizeX / 2) + $this->shiftXleft;
+ $yc = $y = (int) ($this->sizeY / 2) + $this->shiftY;
// bottom angle line
$anglestart = 0;
@@ -701,12 +703,8 @@ class CPieGraphDraw extends CGraphDraw {
$this->exploderad = (int) $this->sizeX / 100;
$this->exploderad3d = (int) $this->sizeX / 60;
- if (function_exists('ImageColorExactAlpha') && function_exists('ImageCreateTrueColor') && @imagecreatetruecolor(1, 1)) {
- $this->im = imagecreatetruecolor($this->fullSizeX, $this->fullSizeY);
- }
- else {
- $this->im = imagecreate($this->fullSizeX, $this->fullSizeY);
- }
+ $this->im = imagecreatetruecolor($this->fullSizeX, $this->fullSizeY);
+
$this->initColors();
$this->drawRectangle();
$this->drawHeader();
diff --git a/ui/include/classes/helpers/CArrayHelper.php b/ui/include/classes/helpers/CArrayHelper.php
index 0815349e6ae..6d0c440eb1f 100644
--- a/ui/include/classes/helpers/CArrayHelper.php
+++ b/ui/include/classes/helpers/CArrayHelper.php
@@ -208,6 +208,16 @@ class CArrayHelper {
}
/**
+ * Sort array by keys in ascending order.
+ * Performs case-insensitive string comparisons using a "natural order" algorithm.
+ *
+ * @param array $array
+ */
+ public static function ksort(array &$array): void {
+ uksort($array, 'strnatcasecmp');
+ }
+
+ /**
* Unset values that are contained in $a2 from $a1. Skip arrays and keys given in $skipKeys.
*
* @param array $a1 array to modify
diff --git a/ui/include/classes/ldap/CLdap.php b/ui/include/classes/ldap/CLdap.php
index 5161ada9b9f..17e87f2ffb4 100644
--- a/ui/include/classes/ldap/CLdap.php
+++ b/ui/include/classes/ldap/CLdap.php
@@ -225,7 +225,7 @@ class CLdap {
$filter = '(ObjectClass=*)';
}
$sr = @ldap_search($this->ds, $base, $filter);
- $result = is_resource($sr) ? @ldap_get_entries($this->ds, $sr) : [];
+ $result = $sr !== false ? @ldap_get_entries($this->ds, $sr) : [];
// don't accept more or less than one response
if (!$result || $result['count'] != 1) {
diff --git a/ui/include/classes/macros/CMacrosResolverGeneral.php b/ui/include/classes/macros/CMacrosResolverGeneral.php
index 9bdaf283122..c47be267524 100644
--- a/ui/include/classes/macros/CMacrosResolverGeneral.php
+++ b/ui/include/classes/macros/CMacrosResolverGeneral.php
@@ -310,14 +310,14 @@ class CMacrosResolverGeneral {
* @param bool $types['functionids'] Extract numeric macros. For example, "{12345}".
* @param bool $types['expr_macros'] Extract expression macros.
* For example, "{?func(/host/key, param)}".
- * @param bool $types['expr_macros_host'] Extract expression macros with with the ability to
+ * @param bool $types['expr_macros_host'] Extract expression macros with the ability to
* specify a {HOST.HOST} macro or an empty host name
* instead of a hostname.
* For example,
* "{?func(/host/key, param)}",
* "{?func(/{HOST.HOST}/key, param)}",
* "{?func(//key, param)}".
- * @param bool $types['expr_macros_host_n'] Extract expression macros with with the ability to
+ * @param bool $types['expr_macros_host_n'] Extract expression macros with the ability to
* specify a {HOST.HOST<1-9>} macro or an empty host
* name instead of a hostname.
* For example,
diff --git a/ui/include/classes/server/CZabbixServer.php b/ui/include/classes/server/CZabbixServer.php
index c045a204c4c..be6a3685c80 100644
--- a/ui/include/classes/server/CZabbixServer.php
+++ b/ui/include/classes/server/CZabbixServer.php
@@ -58,7 +58,7 @@ class CZabbixServer {
const ZBX_TCP_EXPECT_DATA = 2;
/**
- * Max number of bytes to read from the response for each each iteration.
+ * Max number of bytes to read from the response for each iteration.
*/
const READ_BYTES_LIMIT = 8192;
diff --git a/ui/include/classes/validators/CApiInputValidator.php b/ui/include/classes/validators/CApiInputValidator.php
index 17827dcce9b..ab69f47c5a3 100644
--- a/ui/include/classes/validators/CApiInputValidator.php
+++ b/ui/include/classes/validators/CApiInputValidator.php
@@ -2554,14 +2554,14 @@ class CApiInputValidator {
return true;
}
- [$year, $month, $day] = sscanf($data, '%d-%d-%d');
+ $date = DateTime::createFromFormat(ZBX_DATE, $data);
- if (!checkdate($month, $day, $year)) {
+ if (!$date || $date->format(ZBX_DATE) !== $data) {
$error = _s('Invalid parameter "%1$s": %2$s.', $path, _('a date in YYYY-MM-DD format is expected'));
return false;
}
- if (!validateDateInterval($year, $month, $day)) {
+ if (!validateDateInterval($date->format('Y'), $date->format('m'), $date->format('d'))) {
$error = _s('Invalid parameter "%1$s": %2$s.', $path,
_s('value must be between "%1$s" and "%2$s"', '1970-01-01', '2038-01-18')
);
diff --git a/ui/include/classes/widgets/CWidgetHelper.php b/ui/include/classes/widgets/CWidgetHelper.php
index cf5b10569b6..633a899d4d0 100644
--- a/ui/include/classes/widgets/CWidgetHelper.php
+++ b/ui/include/classes/widgets/CWidgetHelper.php
@@ -135,16 +135,16 @@ class CWidgetHelper {
* @return CLabel
*/
public static function getLabel($field, $class = null, $hint = null) {
+ $help_icon = ($hint !== null)
+ ? makeHelpIcon($hint)
+ : null;
+
if ($field instanceof CWidgetFieldSelect) {
- return (new CLabel($field->getLabel(), 'label-'.$field->getName()))
+ return (new CLabel([$field->getLabel(), $help_icon], 'label-'.$field->getName()))
->setAsteriskMark(self::isAriaRequired($field))
->addClass($class);
}
- $help_icon = ($hint !== null)
- ? makeHelpIcon($hint)
- : null;
-
return (new CLabel([$field->getLabel(), $help_icon], $field->getName()))
->setAsteriskMark(self::isAriaRequired($field))
->addClass($class);
@@ -598,7 +598,7 @@ class CWidgetHelper {
];
$table = (new CTable())
->setId('list_'.$field->getName())
- ->setHeader((new CRowHeader($header))->addClass($columns ? null : ZBX_STYLE_DISPLAY_NONE));
+ ->setHeader($header);
$enabled = !($field->getFlags() & CWidgetField::FLAG_DISABLED);
foreach ($columns as $column_index => $column) {
diff --git a/ui/include/classes/widgets/views/widget.item.form.view.php b/ui/include/classes/widgets/views/widget.item.form.view.php
index 7253d34b46a..2581f69dd8b 100644
--- a/ui/include/classes/widgets/views/widget.item.form.view.php
+++ b/ui/include/classes/widgets/views/widget.item.form.view.php
@@ -138,7 +138,9 @@ $form_list
->addClass('form-field')
->addClass('field-fluid'),
- CWidgetHelper::getLabel($fields['units_pos']),
+ CWidgetHelper::getLabel($fields['units_pos'], null,
+ _('Position is ignored for s, uptime and unixtime units.')
+ ),
(new CDiv(CWidgetHelper::getSelect($fields['units_pos'])))->addClass('form-field'),
CWidgetHelper::getLabel($fields['units_size']),
diff --git a/ui/include/config.inc.php b/ui/include/config.inc.php
index 22a01953af4..6b77e16cbc3 100644
--- a/ui/include/config.inc.php
+++ b/ui/include/config.inc.php
@@ -70,7 +70,7 @@ catch (Exception $e) {
CProfiler::getInstance()->start();
-global $ZBX_SERVER, $ZBX_SERVER_PORT, $page;
+global $page;
$page = [
'title' => null,
diff --git a/ui/include/defines.inc.php b/ui/include/defines.inc.php
index c46e4bc7844..ab614c47714 100644
--- a/ui/include/defines.inc.php
+++ b/ui/include/defines.inc.php
@@ -18,8 +18,8 @@
** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
-define('ZABBIX_VERSION', '6.0.5');
-define('ZABBIX_API_VERSION', '6.0.5');
+define('ZABBIX_VERSION', '6.0.6rc1');
+define('ZABBIX_API_VERSION', '6.0.6');
define('ZABBIX_EXPORT_VERSION', '6.0');
define('ZABBIX_DB_VERSION', 6000000);
diff --git a/ui/include/draw.inc.php b/ui/include/draw.inc.php
index 2fb972d71a7..2590dcfe664 100644
--- a/ui/include/draw.inc.php
+++ b/ui/include/draw.inc.php
@@ -27,14 +27,14 @@
* @param array $fgColor foreground color, array of RGB
* @param float $alpha transparency index in range of 0-1, 1 returns unchanged fgColor color
*
- * @return array new color
+ * @return int A color index.
*/
function zbx_colormix($image, $bgColor, $fgColor, $alpha) {
$r = $bgColor[0] + ($fgColor[0] - $bgColor[0]) * $alpha;
$g = $bgColor[1] + ($fgColor[1] - $bgColor[1]) * $alpha;
$b = $bgColor[2] + ($fgColor[2] - $bgColor[2]) * $alpha;
- return imagecolorresolvealpha($image, $r, $g, $b, 0);
+ return imagecolorresolvealpha($image, (int) $r, (int) $g, (int) $b, 0);
}
/**
diff --git a/ui/include/func.inc.php b/ui/include/func.inc.php
index 6fe4b4677d8..99176b386fe 100644
--- a/ui/include/func.inc.php
+++ b/ui/include/func.inc.php
@@ -1388,7 +1388,7 @@ function make_sorting_header($obj, $tabfield, $sortField, $sortOrder, $link = nu
*
* @param string $number Valid number in decimal or scientific notation.
* @param int|null $precision Max number of significant digits to take into account. Default: ZBX_FLOAT_DIG.
- * @param int|null $decimals Max number of first non-zero decimals decimals to display. Default: 0.
+ * @param int|null $decimals Max number of first non-zero decimals to display. Default: 0.
* @param bool $exact Display exactly this number of decimals instead of first non-zeros.
*
* Note: $decimals must be less than $precision.
@@ -2231,19 +2231,14 @@ function splitPath($path) {
* @param string $color a hexadecimal color identifier like "1F2C33"
* @param int $alpha
*
- * @return int|false
+ * @return int
*/
function get_color($image, $color, $alpha = 0) {
$red = hexdec('0x'.substr($color, 0, 2));
$green = hexdec('0x'.substr($color, 2, 2));
$blue = hexdec('0x'.substr($color, 4, 2));
- if (function_exists('imagecolorexactalpha') && function_exists('imagecreatetruecolor')
- && @imagecreatetruecolor(1, 1)) {
- return imagecolorexactalpha($image, $red, $green, $blue, $alpha);
- }
-
- return imagecolorallocate($image, $red, $green, $blue);
+ return imagecolorexactalpha($image, $red, $green, $blue, $alpha);
}
/**
diff --git a/ui/include/graphs.inc.php b/ui/include/graphs.inc.php
index 987f4981eec..dec27508bb7 100644
--- a/ui/include/graphs.inc.php
+++ b/ui/include/graphs.inc.php
@@ -532,12 +532,15 @@ function get_next_color($palettetype = 0) {
* @param resource $image
* @param int $fontsize
* @param int $angle
- * @param int $x
- * @param int $y
+ * @param int|float $x
+ * @param int|float $y
* @param int $color a numeric color identifier from imagecolorallocate() or imagecolorallocatealpha()
* @param string $string
*/
function imageText($image, $fontsize, $angle, $x, $y, $color, $string) {
+ $x = (int) $x;
+ $y = (int) $y;
+
if ((preg_match(ZBX_PREG_DEF_FONT_STRING, $string) && $angle != 0) || ZBX_FONT_NAME == ZBX_GRAPH_FONT_NAME) {
$ttf = ZBX_FONTPATH.'/'.ZBX_FONT_NAME.'.ttf';
imagettftext($image, $fontsize, $angle, $x, $y, $color, $ttf, $string);
diff --git a/ui/include/images.inc.php b/ui/include/images.inc.php
index 6dcb93c5306..599c92d8e5c 100644
--- a/ui/include/images.inc.php
+++ b/ui/include/images.inc.php
@@ -75,12 +75,10 @@ function imageThumb($source, $thumbWidth = 0, $thumbHeight = 0) {
}
}
- if (function_exists('imagecreatetruecolor') && @imagecreatetruecolor(1, 1)) {
- $thumb = imagecreatetruecolor($thumbWidth, $thumbHeight);
- }
- else {
- $thumb = imagecreate($thumbWidth, $thumbHeight);
- }
+ $thumbWidth = (int) round($thumbWidth);
+ $thumbHeight = (int) round($thumbHeight);
+
+ $thumb = imagecreatetruecolor($thumbWidth, $thumbHeight);
// preserve png transparency
imagealphablending($thumb, false);
@@ -91,11 +89,13 @@ function imageThumb($source, $thumbWidth = 0, $thumbHeight = 0) {
0, 0,
0, 0,
$thumbWidth, $thumbHeight,
- $srcWidth, $srcHeight);
+ $srcWidth, $srcHeight
+ );
imagedestroy($source);
$source = $thumb;
}
+
return $source;
}
diff --git a/ui/include/items.inc.php b/ui/include/items.inc.php
index 30c74c2ddee..1657080a4e4 100644
--- a/ui/include/items.inc.php
+++ b/ui/include/items.inc.php
@@ -1135,17 +1135,21 @@ function getDataOverviewCellData(array $db_items, array $data, int $show_suppres
* @return array
*/
function getDataOverviewItems(?array $groupids, ?array $hostids, ?array $tags, int $evaltype): array {
-
if ($hostids === null) {
- $limit = (int) CSettingsHelper::get(CSettingsHelper::MAX_OVERVIEW_TABLE_SIZE) + 1;
+ $limit = CSettingsHelper::get(CSettingsHelper::SEARCH_LIMIT);
$db_hosts = API::Host()->get([
- 'output' => [],
+ 'output' => ['name'],
'groupids' => $groupids,
'monitored_hosts' => true,
'with_monitored_items' => true,
- 'preservekeys' => true,
- 'limit' => $limit
+ 'sortfield' => ['name'],
+ 'limit' => $limit,
+ 'preservekeys' => true
]);
+
+ CArrayHelper::sort($db_hosts, ['name']);
+ $db_hosts = array_slice($db_hosts, 0, CSettingsHelper::get(CSettingsHelper::MAX_OVERVIEW_TABLE_SIZE) + 1, true);
+
$hostids = array_keys($db_hosts);
}
@@ -1167,7 +1171,7 @@ function getDataOverviewItems(?array $groupids, ?array $hostids, ?array $tags, i
['field' => 'itemid', 'order' => ZBX_SORT_UP]
]);
- return [$db_items, $hostids];
+ return $db_items;
}
/**
@@ -1184,7 +1188,7 @@ function getDataOverview(?array $groupids, ?array $hostids, array $filter): arra
$tags = (array_key_exists('tags', $filter) && $filter['tags']) ? $filter['tags'] : null;
$evaltype = array_key_exists('evaltype', $filter) ? $filter['evaltype'] : TAG_EVAL_TYPE_AND_OR;
- [$db_items, $hostids] = getDataOverviewItems($groupids, $hostids, $tags, $evaltype);
+ $db_items = getDataOverviewItems($groupids, $hostids, $tags, $evaltype);
$data = [];
$item_counter = [];
@@ -1237,35 +1241,51 @@ function getDataOverview(?array $groupids, ?array $hostids, array $filter): arra
]);
$data_display_limit = (int) CSettingsHelper::get(CSettingsHelper::MAX_OVERVIEW_TABLE_SIZE);
- $has_hidden_hosts = (count($db_hosts) > $data_display_limit);
+ $has_hidden_data = count($data) > $data_display_limit || count($db_hosts) > $data_display_limit;
$db_hosts = array_slice($db_hosts, 0, $data_display_limit, true);
+ $host_names = array_column($db_hosts, 'name', 'name');
- $data = array_slice($data, 0, $data_display_limit, true);
- $items_left = $data_display_limit;
$itemids = [];
- array_walk($data, function (array &$item_columns) use ($data_display_limit, &$itemids, &$items_left) {
+ $items_left = $data_display_limit;
+
+ foreach ($data as &$item_columns) {
if ($items_left != 0) {
$item_columns = array_slice($item_columns, 0, min($data_display_limit, $items_left));
$items_left -= count($item_columns);
}
else {
$item_columns = null;
- return;
+ break;
}
- array_walk($item_columns, function (array &$item_column) use ($data_display_limit, &$itemids) {
+ foreach ($item_columns as &$item_column) {
+ CArrayHelper::ksort($item_column);
$item_column = array_slice($item_column, 0, $data_display_limit, true);
- $itemids += array_column($item_column, 'itemid', 'itemid');
- });
- });
+
+ foreach ($item_column as $host_name => $item) {
+ if (array_key_exists($host_name, $host_names)) {
+ $itemids[$item['itemid']] = true;
+ }
+ else {
+ unset($item_column[$host_name]);
+ }
+ }
+ }
+ unset($item_column);
+
+ $item_columns = array_filter($item_columns);
+ }
+ unset($item_columns);
+
$data = array_filter($data);
+ $data = array_slice($data, 0, $data_display_limit, true);
- $has_hidden_items = (count($db_items) != count($itemids));
+ $has_hidden_data = $has_hidden_data || count($db_items) != count($itemids);
$db_items = array_intersect_key($db_items, $itemids);
$data = getDataOverviewCellData($db_items, $data, $filter['show_suppressed']);
- return [$data, $db_hosts, ($has_hidden_items || $has_hidden_hosts)];
+ return [$data, $db_hosts, $has_hidden_data];
}
/**
diff --git a/ui/include/views/configuration.graph.edit.php b/ui/include/views/configuration.graph.edit.php
index 744a56c41af..a241086e79a 100644
--- a/ui/include/views/configuration.graph.edit.php
+++ b/ui/include/views/configuration.graph.edit.php
@@ -178,7 +178,8 @@ if ($this->data['graphtype'] == GRAPH_TYPE_NORMAL || $this->data['graphtype'] ==
GRAPH_YAXIS_TYPE_FIXED => _('Fixed'),
GRAPH_YAXIS_TYPE_ITEM_VALUE => _('Item')
]))
- ->setDisabled($readonly);
+ ->setDisabled($readonly)
+ ->setFocusableElementId('ymin_type_label');
if ($this->data['ymin_type'] == GRAPH_YAXIS_TYPE_FIXED) {
$yaxisMinData[] = (new CDiv())->addClass(ZBX_STYLE_FORM_INPUT_MARGIN);
@@ -235,11 +236,9 @@ if ($this->data['graphtype'] == GRAPH_TYPE_NORMAL || $this->data['graphtype'] ==
$graphForm->addVar('yaxismin', $this->data['yaxismin']);
}
- $yaxismin_label = new CLabel(_('Y axis MIN value'));
+ $yaxismin_label = new CLabel(_('Y axis MIN value'), 'ymin_type_label');
if ($this->data['ymin_type'] == GRAPH_YAXIS_TYPE_ITEM_VALUE) {
- $yaxismin_label
- ->setAsteriskMark()
- ->setAttribute('for', 'ymin_name');
+ $yaxismin_label->setAsteriskMark();
}
$graphFormList->addRow($yaxismin_label, $yaxisMinData);
@@ -253,7 +252,8 @@ if ($this->data['graphtype'] == GRAPH_TYPE_NORMAL || $this->data['graphtype'] ==
GRAPH_YAXIS_TYPE_FIXED => _('Fixed'),
GRAPH_YAXIS_TYPE_ITEM_VALUE => _('Item')
]))
- ->setDisabled($readonly);
+ ->setDisabled($readonly)
+ ->setFocusableElementId('ymax_type_label');
if ($this->data['ymax_type'] == GRAPH_YAXIS_TYPE_FIXED) {
$yaxisMaxData[] = (new CDiv())->addClass(ZBX_STYLE_FORM_INPUT_MARGIN);
@@ -310,11 +310,9 @@ if ($this->data['graphtype'] == GRAPH_TYPE_NORMAL || $this->data['graphtype'] ==
$graphForm->addVar('yaxismax', $this->data['yaxismax']);
}
- $yaxismax_label = new CLabel(_('Y axis MAX value'));
+ $yaxismax_label = new CLabel(_('Y axis MAX value'), 'ymax_type_label');
if ($this->data['ymax_type'] == GRAPH_YAXIS_TYPE_ITEM_VALUE) {
- $yaxismax_label
- ->setAsteriskMark()
- ->setAttribute('for', 'ymax_name');
+ $yaxismax_label->setAsteriskMark();
}
$graphFormList->addRow($yaxismax_label, $yaxisMaxData);
diff --git a/ui/include/views/configuration.graph.list.php b/ui/include/views/configuration.graph.list.php
index 961da213066..f656e7eb2da 100644
--- a/ui/include/views/configuration.graph.list.php
+++ b/ui/include/views/configuration.graph.list.php
@@ -77,7 +77,7 @@ else {
->addFilterTab(_('Filter'), [
(new CFormList())
->addRow(
- (new CLabel(_('Host groups'), 'filter_groups__ms')),
+ (new CLabel(_('Host groups'), 'filter_groupids__ms')),
(new CMultiSelect([
'name' => 'filter_groupids[]',
'object_name' => 'hostGroup',
@@ -96,7 +96,7 @@ else {
]))->setWidth(ZBX_TEXTAREA_MEDIUM_WIDTH)
)
->addRow(
- (new CLabel(($data['context'] === 'host') ? _('Hosts') : _('Templates'), 'filter_hosts__ms')),
+ (new CLabel(($data['context'] === 'host') ? _('Hosts') : _('Templates'), 'filter_hostids__ms')),
(new CMultiSelect([
'name' => 'filter_hostids[]',
'object_name' => ($data['context'] === 'host') ? 'hosts' : 'templates',
diff --git a/ui/include/views/configuration.host.prototype.edit.php b/ui/include/views/configuration.host.prototype.edit.php
index 85982678374..20f241ddcd5 100644
--- a/ui/include/views/configuration.host.prototype.edit.php
+++ b/ui/include/views/configuration.host.prototype.edit.php
@@ -174,7 +174,7 @@ else {
}
$host_tab
- ->addRow(_('Templates'),
+ ->addRow(new CLabel(_('Templates'), 'add_templates__ms'),
(count($templates_field_items) > 1)
? (new CDiv($templates_field_items))->addClass('linked-templates')
: $templates_field_items
@@ -206,7 +206,7 @@ $host_tab->addRow(
// New group prototypes.
$host_tab->addRow(
- _('Group prototypes'),
+ new CLabel(_('Group prototypes'), 'group_prototypes'),
(new CDiv(
(new CTable())
->setId('tbl_group_prototypes')
diff --git a/ui/include/views/configuration.httpconf.list.php b/ui/include/views/configuration.httpconf.list.php
index 7c0031979c9..5a62c994300 100644
--- a/ui/include/views/configuration.httpconf.list.php
+++ b/ui/include/views/configuration.httpconf.list.php
@@ -28,7 +28,7 @@ $hg_ms_params = ($data['context'] === 'host') ? ['real_hosts' => 1] : ['template
$filter_column_left = (new CFormList())
->addRow(
- (new CLabel(_('Host groups'), 'filter_groups__ms')),
+ (new CLabel(_('Host groups'), 'filter_groupids__ms')),
(new CMultiSelect([
'name' => 'filter_groupids[]',
'object_name' => 'hostGroup',
@@ -47,7 +47,7 @@ $filter_column_left = (new CFormList())
]))->setWidth(ZBX_TEXTAREA_MEDIUM_WIDTH)
)
->addRow(
- (new CLabel(($data['context'] === 'host') ? _('Hosts') : _('Templates'), 'filter_hosts__ms')),
+ (new CLabel(($data['context'] === 'host') ? _('Hosts') : _('Templates'), 'filter_hostids__ms')),
(new CMultiSelect([
'name' => 'filter_hostids[]',
'object_name' => ($data['context'] === 'host') ? 'hosts' : 'templates',
diff --git a/ui/include/views/configuration.item.edit.php b/ui/include/views/configuration.item.edit.php
index 6c344b73330..ececb17746b 100644
--- a/ui/include/views/configuration.item.edit.php
+++ b/ui/include/views/configuration.item.edit.php
@@ -645,7 +645,7 @@ if ($data['display_interfaces']) {
}
}
-// Append SNMP common fields fields.
+// Append SNMP common fields.
$item_tab->addItem([
(new CLabel(_('SNMP OID'), 'snmp_oid'))
->setAsteriskMark()
diff --git a/ui/include/views/configuration.template.edit.php b/ui/include/views/configuration.template.edit.php
index 02e065882c4..0178db086f4 100644
--- a/ui/include/views/configuration.template.edit.php
+++ b/ui/include/views/configuration.template.edit.php
@@ -142,7 +142,7 @@ $templates_field_items[] = (new CMultiSelect([
$template_tab
->addRow(
- new CLabel(_('Templates')),
+ new CLabel(_('Templates'), 'add_templates__ms'),
(count($templates_field_items) > 1)
? (new CDiv($templates_field_items))->addClass('linked-templates')
: $templates_field_items
diff --git a/ui/include/views/configuration.triggers.list.php b/ui/include/views/configuration.triggers.list.php
index 1591d3d5016..7415cb4588a 100644
--- a/ui/include/views/configuration.triggers.list.php
+++ b/ui/include/views/configuration.triggers.list.php
@@ -29,7 +29,7 @@ require_once dirname(__FILE__).'/js/configuration.triggers.list.js.php';
$hg_ms_params = ($data['context'] === 'host') ? ['real_hosts' => 1] : ['templated_hosts' => 1];
$filter_column1 = (new CFormList())
- ->addRow((new CLabel(_('Host groups'), 'filter_groupids')),
+ ->addRow((new CLabel(_('Host groups'), 'filter_groupids__ms')),
(new CMultiSelect([
'name' => 'filter_groupids[]',
'object_name' => 'hostGroup',
@@ -46,7 +46,7 @@ $filter_column1 = (new CFormList())
]
]))->setWidth(ZBX_TEXTAREA_FILTER_STANDARD_WIDTH)
)
- ->addRow((new CLabel(($data['context'] === 'host') ? _('Hosts') : _('Templates'), 'filter_hostids')),
+ ->addRow((new CLabel(($data['context'] === 'host') ? _('Hosts') : _('Templates'), 'filter_hostids__ms')),
(new CMultiSelect([
'name' => 'filter_hostids[]',
'object_name' => ($data['context'] === 'host') ? 'hosts' : 'templates',
diff --git a/ui/include/views/js/configuration.host.prototype.edit.js.php b/ui/include/views/js/configuration.host.prototype.edit.js.php
index 98e40f66b96..2bf10197ec5 100644
--- a/ui/include/views/js/configuration.host.prototype.edit.js.php
+++ b/ui/include/views/js/configuration.host.prototype.edit.js.php
@@ -253,5 +253,11 @@
}
jQuery('input[name=tls_connect]').trigger('change');
+
+ jQuery('#host')
+ .on('input keydown paste', function () {
+ $('#name').attr('placeholder', $(this).val());
+ })
+ .trigger('input');
});
</script>
diff --git a/ui/include/views/js/monitoring.sysmaps.js.php b/ui/include/views/js/monitoring.sysmaps.js.php
index f929a4cc364..00712c93eb2 100644
--- a/ui/include/views/js/monitoring.sysmaps.js.php
+++ b/ui/include/views/js/monitoring.sysmaps.js.php
@@ -127,7 +127,7 @@ function createFontSelect(string $name): CSelect {
->setId('areaSizeHeight')
], 'areaSizeRow')
->addRow(_('Placing algorithm'),
- (new CRadioButtonList(null, SYSMAP_ELEMENT_AREA_VIEWTYPE_GRID))
+ (new CRadioButtonList('viewtype', SYSMAP_ELEMENT_AREA_VIEWTYPE_GRID))
->addValue(_('Grid'), SYSMAP_ELEMENT_AREA_VIEWTYPE_GRID)
->setModern(true),
'areaPlacingRow'
diff --git a/ui/js/class.notifications.js b/ui/js/class.notifications.js
index 92057416fef..94f0ead3ba3 100644
--- a/ui/js/class.notifications.js
+++ b/ui/js/class.notifications.js
@@ -382,7 +382,7 @@ ZBX_Notifications.prototype.handlePushedActiveTabid = function(tabid) {
* When active tab is unloaded, any sibling tab is set to become active. If single session, then we drop LS (privacy).
* We cannot know if this unload will happen because of navigation, scripted reload or a tab was just closed.
* Latter is always assumed, so when navigating active tab, focus is deligated onto to any tab if possible,
- * then this tab might reclaim focus again at construction if during during that time document has focus.
+ * then this tab might reclaim focus again at construction if during that time document has focus.
* At slow connection during page navigation there will be another active tab polling for notifications (if multitab).
* Here `tab` is referred as ZBX_Notifications instance and `focus` - whether instance is `active` (not focused).
*
diff --git a/ui/locale/bg/LC_MESSAGES/frontend.po b/ui/locale/bg/LC_MESSAGES/frontend.po
index 27892d9f71a..d80f2f0a8db 100644
--- a/ui/locale/bg/LC_MESSAGES/frontend.po
+++ b/ui/locale/bg/LC_MESSAGES/frontend.po
@@ -12939,6 +12939,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/ca/LC_MESSAGES/frontend.po b/ui/locale/ca/LC_MESSAGES/frontend.po
index dda32c0f275..65ed854cba7 100644
--- a/ui/locale/ca/LC_MESSAGES/frontend.po
+++ b/ui/locale/ca/LC_MESSAGES/frontend.po
@@ -12914,6 +12914,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/cs/LC_MESSAGES/frontend.po b/ui/locale/cs/LC_MESSAGES/frontend.po
index 39322853f24..37ab0fa2e20 100644
--- a/ui/locale/cs/LC_MESSAGES/frontend.po
+++ b/ui/locale/cs/LC_MESSAGES/frontend.po
@@ -1,19 +1,20 @@
+#
msgid ""
msgstr ""
"Project-Id-Version: Zabbix 6.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-03-02 16:36+0200\n"
-"PO-Revision-Date: 2018-09-24 07:57+0300\n"
-"Last-Translator: Lukáš <iam@lukasmaly.net>\n"
+"PO-Revision-Date: 2022-06-09 22:52+0000\n"
+"Last-Translator: Tomáš Heřmánek <tomas.hermanek@initmax.cz>\n"
"Language-Team: Zabbix <info@zabbix.com>\n"
"Language: cs\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
-"X-Generator: Pootle 2.5.1.3\n"
-"X-Poedit-Basepath: ../../\n"
+"X-Generator: Pontoon\n"
"X-POOTLE-MTIME: 1537768671.000000\n"
+"X-Poedit-Basepath: ../../\n"
#: app/controllers/CControllerSlaReportList.php:181
#: include/classes/widgets/forms/CWidgetFormSlaReport.php:104
@@ -404,16 +405,16 @@ msgstr "PŘIDEJ PŘEPIS"
#: app/views/administration.user.list.php:114
msgid "API access"
-msgstr ""
+msgstr "API přístup"
#: app/controllers/CControllerPopupGeneric.php:333
#: app/views/administration.userrole.edit.php:285
msgid "API methods"
-msgstr ""
+msgstr "API metody"
#: app/controllers/CControllerAuditLogList.php:210
msgid "API token"
-msgstr ""
+msgstr "API token"
#: include/classes/api/services/CToken.php:299
#, c-format
@@ -422,14 +423,14 @@ msgstr ""
#: app/controllers/CControllerTokenCreate.php:98
msgid "API token added"
-msgstr ""
+msgstr "Byl přidán API token"
#: app/controllers/CControllerTokenDelete.php:61
msgid "API token deleted"
msgid_plural "API tokens deleted"
-msgstr[0] ""
-msgstr[1] ""
-msgstr[2] ""
+msgstr[0] "API token byl smazán"
+msgstr[1] "API tokeny smazány"
+msgstr[2] "API tokeny smazány"
#: app/controllers/CControllerTokenDisable.php:63
msgid "API token disabled"
@@ -441,17 +442,17 @@ msgstr[2] ""
#: app/controllers/CControllerTokenEnable.php:63
msgid "API token enabled"
msgid_plural "API tokens enabled"
-msgstr[0] ""
-msgstr[1] ""
-msgstr[2] ""
+msgstr[0] "API token je povolený"
+msgstr[1] "API tokeny jsou povoleny"
+msgstr[2] "API tokeny jsou povoleny"
#: include/classes/api/clients/CLocalApiClient.php:198
msgid "API token expired."
-msgstr ""
+msgstr "API token expiroval."
#: app/controllers/CControllerTokenUpdate.php:114
msgid "API token updated"
-msgstr ""
+msgstr "API token byl aktualizován"
#: app/controllers/CControllerTokenEdit.php:107
#: app/controllers/CControllerTokenList.php:193
@@ -469,7 +470,7 @@ msgstr ""
#: include/classes/helpers/CMenuHelper.php:397 include/html.inc.php:820
#: include/html.inc.php:900
msgid "API tokens"
-msgstr ""
+msgstr "API tokeny"
#: include/classes/widgets/forms/CWidgetFormItem.php:251
msgid "Above value"
@@ -490,22 +491,22 @@ msgstr "Přístup odepřen!"
#: app/views/administration.user.edit.php:599
#: app/views/administration.userrole.edit.php:271
msgid "Access to API"
-msgstr ""
+msgstr "Přístup do API"
#: app/views/administration.user.edit.php:452
#: app/views/administration.userrole.edit.php:81
msgid "Access to UI elements"
-msgstr ""
+msgstr "Přístup k UI prvkům"
#: app/views/administration.user.edit.php:628
#: app/views/administration.userrole.edit.php:320
msgid "Access to actions"
-msgstr ""
+msgstr "Přístup k akcím"
#: app/views/administration.user.edit.php:570
#: app/views/administration.userrole.edit.php:222
msgid "Access to modules"
-msgstr ""
+msgstr "Přístup k modulům"
#: app/views/administration.user.edit.php:474
#: app/views/administration.userrole.edit.php:134
@@ -526,7 +527,7 @@ msgstr "Přijmout"
#: include/classes/helpers/CRoleHelper.php:463
msgid "Acknowledge problems"
-msgstr ""
+msgstr "Potvrdit problémy"
#: app/views/popup.condition.common.php:684 include/actions.inc.php:2075
#: include/events.inc.php:196
@@ -621,7 +622,7 @@ msgstr "Akce \"%1$s\" již existuje."
#: include/classes/api/services/CRole.php:761
#, c-format
msgid "Action \"%2$s\" is not available for user role \"%1$s\"."
-msgstr ""
+msgstr "Akce \"%2$s\" není pro uživatelskou roli \"%1$s\" dovolena."
#: actionconf.php:301
msgid "Action added"
@@ -873,15 +874,15 @@ msgstr "Přidat podřízený prvek"
#: app/partials/service.list.edit.php:120
msgid "Add child service"
-msgstr ""
+msgstr "Přidat podřízenou službu"
#: app/views/js/popup.service.edit.js.php:401
msgid "Add child services"
-msgstr ""
+msgstr "Přidat podřízené služby"
#: app/partials/configuration.valuemap.php:47
msgid "Add from"
-msgstr ""
+msgstr "Přidat od"
#: include/actions.inc.php:686 include/actions.inc.php:1029
msgid "Add host"
@@ -889,12 +890,12 @@ msgstr "Přidat hostitele"
#: include/classes/widgets/views/widget.tophosts.form.view.php:75
msgid "Add item column"
-msgstr ""
+msgstr "Přidat sloupec item"
#: app/partials/massupdate.macros.tab.php:95
#: app/partials/massupdate.valuemaps.tab.php:37
msgid "Add missing"
-msgstr ""
+msgstr "Přidat chybějící"
#: jsLoader.php:205
msgid "Add multiple maps"
@@ -907,7 +908,7 @@ msgstr "Přidej nový set dat"
#: app/partials/configuration.host.edit.html.php:256
#: include/views/configuration.host.prototype.edit.php:266
msgid "Add new interface"
-msgstr ""
+msgstr "Přidat nové rozhraní"
#: include/classes/widgets/CWidgetHelper.php:906
msgid "Add new override"
@@ -916,15 +917,15 @@ msgstr "Přidej nový přepis"
#: app/views/js/configuration.dashboard.edit.js.php:202
#: app/views/js/monitoring.dashboard.view.js.php:290
msgid "Add page"
-msgstr ""
+msgstr "Přidat stranu"
#: app/views/js/popup.service.edit.js.php:431
msgid "Add parent services"
-msgstr ""
+msgstr "Přidat nadřazené služby"
#: include/classes/helpers/CRoleHelper.php:461
msgid "Add problem comments"
-msgstr ""
+msgstr "Přidat komentář k problému"
#: app/views/popup.massupdate.trigger.php:101
#: include/views/configuration.graph.edit.php:408
@@ -934,11 +935,11 @@ msgstr "Přidat prototyp"
#: app/controllers/CControllerPopupScheduledReportEdit.php:127
msgid "Add scheduled report"
-msgstr ""
+msgstr "Přidat naplánovaný report"
#: app/views/js/administration.userrole.edit.js.php:210
msgid "Add services"
-msgstr ""
+msgstr "Přidat služby"
#: app/views/monitoring.widget.navtreeitem.edit.php:68
msgid "Add submaps"
@@ -946,7 +947,7 @@ msgstr "Přidat dílčí mapy"
#: app/controllers/CControllerFavouriteDelete.php:69 include/html.inc.php:153
msgid "Add to favorites"
-msgstr ""
+msgstr "Přidat k oblíbeným"
#: include/actions.inc.php:1033
msgid "Add to host group"
@@ -958,11 +959,11 @@ msgstr "Přidat do skupin"
#: app/partials/scheduledreport.subscription.php:42
msgid "Add user"
-msgstr ""
+msgstr "Přidat uživatele"
#: app/partials/scheduledreport.subscription.php:46
msgid "Add user group"
-msgstr ""
+msgstr "Přidat skupinu uživatelů"
#: app/views/js/configuration.dashboard.edit.js.php:198
#: app/views/js/monitoring.dashboard.view.js.php:286
@@ -973,7 +974,7 @@ msgstr "Přidat widget"
#: app/controllers/CControllerAuditLogList.php:356
#: app/controllers/CControllerPopupImportCompare.php:161
msgid "Added"
-msgstr ""
+msgstr "Přidáno"
#: jsLoader.php:340
#, c-format
@@ -983,15 +984,15 @@ msgstr "Přidáno, %1$s"
#: app/views/popup.service.statusrule.edit.php:94
msgid "Additional rule"
-msgstr ""
+msgstr "Další pravidlo"
#: app/views/popup.service.edit.php:182
msgid "Additional rules"
-msgstr ""
+msgstr "Další pravidla"
#: app/partials/administration.ha.nodes.php:28
msgid "Address"
-msgstr ""
+msgstr "Adresa"
#: include/hosts.inc.php:63 include/users.inc.php:53
msgid "Admin"
@@ -1005,7 +1006,7 @@ msgstr "Administrace"
#: app/views/popup.service.edit.php:158
#: include/classes/widgets/forms/CWidgetFormItem.php:69
msgid "Advanced configuration"
-msgstr ""
+msgstr "Pokročilá konfigurace"
#: include/views/monitoring.sysmap.edit.php:158
msgid "Advanced labels"
@@ -1055,21 +1056,21 @@ msgstr ""
#: include/classes/widgets/CWidgetHelper.php:1317
msgid "Aggregate"
-msgstr ""
+msgstr "Agregace"
#: app/views/popup.triggerexpr.php:109
msgid "Aggregate functions"
-msgstr ""
+msgstr "Agregační funkce"
#: app/views/popup.tophosts.column.edit.php:120
#: include/classes/widgets/CWidgetHelper.php:1289
msgid "Aggregation function"
-msgstr ""
+msgstr "Agregační funkce"
#: app/views/popup.tophosts.column.edit.php:139
#: include/classes/widgets/CWidgetHelper.php:1308
msgid "Aggregation interval"
-msgstr ""
+msgstr "Agregační interval"
#: include/actions.inc.php:2106
msgid "Alert message"
@@ -1077,7 +1078,7 @@ msgstr "Poplašná zpráva"
#: app/controllers/CControllerPopupTriggerExpr.php:1116
msgid "Algorithm"
-msgstr ""
+msgstr "Algoritmus"
#: include/hosts.inc.php:114
msgid "Alias"
@@ -1112,7 +1113,7 @@ msgstr "Všechny panely"
#: app/views/monitoring.charts.view.php:82
msgid "All graphs"
-msgstr ""
+msgstr "Všechny grafy"
#: app/partials/administration.usergroup.grouprights.html.php:46
#: app/views/administration.user.edit.php:425
@@ -1144,7 +1145,7 @@ msgstr "Všechny problémy, pokud se jejich hodnoty rovnají tagu"
#: app/controllers/CControllerServiceListGeneral.php:150
msgid "All services"
-msgstr ""
+msgstr "Všechny služby"
#: app/partials/administration.usergroup.tagfilters.html.php:47
msgid "All tags"
@@ -1156,7 +1157,7 @@ msgstr "Všechny šablony"
#: app/views/administration.userrole.edit.php:289
msgid "Allow list"
-msgstr ""
+msgstr "Seznam povolených"
#: app/views/popup.massupdate.trigger.php:60 include/triggers.inc.php:991
#: include/views/configuration.trigger.prototype.edit.php:526
@@ -1173,7 +1174,7 @@ msgstr "Povolení hosté"
#: app/views/administration.user.edit.php:619
msgid "Allowed methods"
-msgstr ""
+msgstr "Povolené metody"
#: include/classes/setup/CSetupWizard.php:882
msgid "Alternatively, you can install it manually:"
@@ -1316,7 +1317,7 @@ msgstr "Aritmetický"
#: app/views/popup.tophosts.column.edit.php:150
#: include/classes/helpers/CServiceHelper.php:148
msgid "As is"
-msgstr ""
+msgstr "Ponechat stejné"
#: include/views/monitoring.history.php:102
msgid "As plain text"
@@ -1453,7 +1454,7 @@ msgstr "Audit log"
#: app/views/administration.housekeeping.edit.php:179
msgid "Audit settings"
-msgstr ""
+msgstr "Nastavení auditu"
#: include/func.inc.php:243
msgid "Aug"
@@ -1577,7 +1578,7 @@ msgstr "Automatický výběr ikony"
#: include/classes/helpers/CMediatypeHelper.php:189
#: include/classes/helpers/CMenuHelper.php:249 include/html.inc.php:890
msgid "Autoregistration"
-msgstr ""
+msgstr "Automatická registrace"
#: include/classes/helpers/CMenuHelper.php:209
#: include/views/configuration.action.list.php:35
@@ -1718,7 +1719,7 @@ msgstr "Tělo a záhlaví"
#: include/classes/widgets/forms/CWidgetFormItem.php:275
#: include/classes/widgets/forms/CWidgetFormItem.php:335
msgid "Bold"
-msgstr ""
+msgstr "Tučný"
#: include/graphs.inc.php:59 include/views/js/monitoring.sysmaps.js.php:780
#: include/views/js/monitoring.sysmaps.js.php:870
@@ -1747,7 +1748,7 @@ msgstr "Šířka okraje"
#: app/controllers/CControllerUserUpdateGeneral.php:106
msgid "Both passwords must be equal."
-msgstr ""
+msgstr "Obě hesla musí být stejná."
#: include/classes/widgets/forms/CWidgetFormItem.php:108
#: include/classes/widgets/forms/CWidgetFormItem.php:189
@@ -1892,7 +1893,7 @@ msgstr "Zrušit"
#: app/controllers/CControllerTokenCreate.php:44
#: app/controllers/CControllerTokenCreate.php:105
msgid "Cannot add API token"
-msgstr ""
+msgstr "Nelze přidat token API"
#: include/classes/api/services/CGraph.php:517
#: include/classes/api/services/CGraph.php:531
@@ -2062,7 +2063,7 @@ msgstr ""
#: jsLoader.php:343
msgctxt "screen reader"
msgid "Cannot be removed"
-msgstr ""
+msgstr "Nelze odstranit"
#: include/classes/validators/CLdapAuthValidator.php:79
msgid "Cannot bind anonymously to LDAP server."
@@ -2116,7 +2117,7 @@ msgstr "Není možné se připojit k databázi."
#: include/views/js/configuration.httpconf.edit.js.php:182
msgid "Cannot convert POST data:"
-msgstr ""
+msgstr "Nelze převést POST data:"
#: graphs.php:390
msgid "Cannot copy graph"
@@ -2202,9 +2203,9 @@ msgstr ""
#: app/controllers/CControllerTokenDelete.php:64
msgid "Cannot delete API token"
msgid_plural "Cannot delete API tokens"
-msgstr[0] ""
-msgstr[1] ""
-msgstr[2] ""
+msgstr[0] "Nelze smazat API token"
+msgstr[1] "Nelze smazat API tokeny"
+msgstr[2] "Nelze smazat API tokeny"
#: app/controllers/CControllerSlaDelete.php:79
msgid "Cannot delete SLA"
@@ -2228,12 +2229,11 @@ msgid "Cannot delete assigned user role \"%1$s\"."
msgstr ""
#: app/controllers/CControllerCorrelationDelete.php:68
-#, fuzzy
msgid "Cannot delete correlation"
msgid_plural "Cannot delete correlations"
msgstr[0] "Nelze odstranit korelaci"
-msgstr[1] "Nelze odstranit korelaci"
-msgstr[2] "Nelze odstranit korelaci"
+msgstr[1] "Nelze odstranit korelace"
+msgstr[2] "Nelze odstranit korelace"
#: app/controllers/CControllerDashboardDelete.php:63
#: app/controllers/CControllerTemplateDashboardDelete.php:61
@@ -2249,12 +2249,11 @@ msgid "Cannot delete dependency"
msgstr "Není možné smazat závislost"
#: app/controllers/CControllerDiscoveryDelete.php:69 host_discovery.php:416
-#, fuzzy
msgid "Cannot delete discovery rule"
msgid_plural "Cannot delete discovery rules"
msgstr[0] "Nelze smazat pravidlo nálezu"
-msgstr[1] "Nelze smazat pravidlo nálezu"
-msgstr[2] "Nelze smazat pravidlo nálezu"
+msgstr[1] "Nelze smazat pravidla nálezu"
+msgstr[2] "Nelze smazat pravidla nálezu"
#: host_discovery.php:780
msgid "Cannot delete discovery rules"
@@ -2406,7 +2405,6 @@ msgid "Cannot delete selected actions"
msgstr "Není možné smazat označené akce"
#: app/controllers/CControllerServiceDelete.php:78
-#, fuzzy
msgid "Cannot delete service"
msgid_plural "Cannot delete services"
msgstr[0] "Nelze smazat službu"
@@ -2598,9 +2596,9 @@ msgstr "Není možné zobrazit víc jak jednu položku s typem \"Souhrn grafu\".
#: app/controllers/CControllerTokenEnable.php:66
msgid "Cannot enable API token"
msgid_plural "Cannot enable API tokens"
-msgstr[0] ""
-msgstr[1] ""
-msgstr[2] ""
+msgstr[0] "Nelze povolit API token"
+msgstr[1] "Nelze povolit API tokeny"
+msgstr[2] "Nelze povolit API tokeny"
#: app/controllers/CControllerSlaEnable.php:88
msgid "Cannot enable SLA"
@@ -4321,12 +4319,11 @@ msgid "Correlation added"
msgstr "Korelace přidána"
#: app/controllers/CControllerCorrelationDelete.php:65
-#, fuzzy
msgid "Correlation deleted"
msgid_plural "Correlations deleted"
msgstr[0] "Korelace smazána"
-msgstr[1] "Korelace smazána"
-msgstr[2] "Korelace smazána"
+msgstr[1] "Korelace smazány"
+msgstr[2] "Korelace smazány"
#: app/controllers/CControllerCorrelationDisable.php:72
msgid "Correlation disabled"
@@ -4384,7 +4381,7 @@ msgstr ""
#: app/views/administration.token.list.php:105
#: app/views/administration.user.token.list.php:71
msgid "Create API token"
-msgstr ""
+msgstr "Vytvořit API token"
#: app/views/sla.list.php:188
msgid "Create SLA"
@@ -5274,12 +5271,12 @@ msgstr "Smazat skript?"
#: app/views/administration.token.edit.php:115
#: app/views/administration.user.token.edit.php:97
msgid "Delete selected API token?"
-msgstr ""
+msgstr "Smazat vybraný API token?"
#: app/views/administration.token.list.php:201
#: app/views/administration.user.token.list.php:153
msgid "Delete selected API tokens?"
-msgstr ""
+msgstr "Smazat vybrané API tokeny?"
#: app/views/js/sla.list.js.php:130 app/views/popup.sla.edit.php:274
msgid "Delete selected SLA?"
@@ -5641,7 +5638,7 @@ msgstr "Zakázat hostitele sledované přes vybrané proxy?"
#: app/views/administration.token.list.php:200
#: app/views/administration.user.token.list.php:152
msgid "Disable selected API tokens?"
-msgstr ""
+msgstr "Zakázat označené API tokeny?"
#: app/views/js/sla.list.js.php:118
msgid "Disable selected SLA?"
@@ -5903,12 +5900,11 @@ msgid "Discovery rule created"
msgstr " Pravidlo nálezu vytvořeno"
#: app/controllers/CControllerDiscoveryDelete.php:66 host_discovery.php:416
-#, fuzzy
msgid "Discovery rule deleted"
msgid_plural "Discovery rules deleted"
-msgstr[0] " Pravidlo nálezu smazáno"
-msgstr[1] " Pravidlo nálezu smazáno"
-msgstr[2] " Pravidlo nálezu smazáno"
+msgstr[0] "Pravidlo nálezu smazáno"
+msgstr[1] "Pravidla nálezu smazána"
+msgstr[2] "Pravidla nálezu smazána"
#: app/controllers/CControllerDiscoveryDisable.php:71 host_discovery.php:767
msgid "Discovery rule disabled"
@@ -11261,7 +11257,7 @@ msgstr ""
#: include/classes/helpers/CRoleHelper.php:466
msgid "Manage API tokens"
-msgstr ""
+msgstr "Správa API tokenů "
#: include/classes/helpers/CRoleHelper.php:472
msgid "Manage SLA"
@@ -13032,6 +13028,10 @@ msgstr "Počet položek v Zabbix databázi."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Počet položek v historii proxy, které ještě nebyli odeslané serveru"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Počet položek ve frontě, které jsou zpožděné od do sekund včetně."
@@ -16195,12 +16195,11 @@ msgid "Service data storage period"
msgstr ""
#: app/controllers/CControllerServiceDelete.php:63
-#, fuzzy
msgid "Service deleted"
msgid_plural "Services deleted"
msgstr[0] "Služba byla smazána"
-msgstr[1] "Služba byla smazána"
-msgstr[2] "Služba byla smazána"
+msgstr[1] "Služby byla smazány"
+msgstr[2] "Služby byla smazány"
#: app/views/administration.userrole.edit.php:144
#: app/views/administration.userrole.edit.php:185
diff --git a/ui/locale/de/LC_MESSAGES/frontend.po b/ui/locale/de/LC_MESSAGES/frontend.po
index 21e66bc1033..249a3a62b48 100644
--- a/ui/locale/de/LC_MESSAGES/frontend.po
+++ b/ui/locale/de/LC_MESSAGES/frontend.po
@@ -4,8 +4,8 @@ msgstr ""
"Project-Id-Version: Zabbix 6.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-03-02 16:36+0200\n"
-"PO-Revision-Date: 2022-04-12 18:25+0000\n"
-"Last-Translator: Joachim <joachim.schwender@web.de>\n"
+"PO-Revision-Date: 2022-06-03 17:57+0000\n"
+"Last-Translator: Lukas <lmayer@wind.gmbh>\n"
"Language-Team: Zabbix <info@zabbix.com>\n"
"Language: de\n"
"MIME-Version: 1.0\n"
@@ -20,7 +20,7 @@ msgstr ""
#: include/classes/widgets/forms/CWidgetFormSlaReport.php:104
#, c-format
msgid "\"%1$s\" date must be less than \"%2$s\" date."
-msgstr ""
+msgstr "Das Datum „%1$s“ muss vor dem Datum „%2$s“ liegen."
#: include/classes/db/MysqlDbBackend.php:70
#: include/classes/db/OracleDbBackend.php:142
@@ -1030,7 +1030,7 @@ msgstr "Agent-Schnittstellen"
#: include/classes/data/CItemData.php:829
msgid "Agent variant check. Returns 1 - for Zabbix agent; 2 - for Zabbix agent 2"
-msgstr ""
+msgstr "Prüfung der Agent-Variante. Rückgabewert 1 für Zabbix Agent; 2 für Zabbix Agent 2"
#: include/classes/widgets/CWidgetHelper.php:1317
msgid "Aggregate"
@@ -1361,7 +1361,7 @@ msgstr "At least one UI element must be enabled for the \"%1$s\" user role."
#: include/classes/api/services/CMaintenance.php:683
msgid "At least one day of the week or day of the month must be specified."
-msgstr ""
+msgstr "Mindestens ein Wochentag oder Tag im Monat muss angegeben werden. "
#: include/classes/api/services/CMaintenance.php:382
#: include/classes/api/services/CMaintenance.php:574
@@ -12949,6 +12949,10 @@ msgstr "Anzahl der Datenpunkte in Zabbix Datenbank."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Anzahl der Datenpunkte in Proxy-Ablaufverfolgung, die noch nicht an den Server gesendet wurden"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Anzahl an Datenpunkten in der Warteschlange, die von bis Sekunden, inklusive verzögert wurden."
diff --git a/ui/locale/el/LC_MESSAGES/frontend.po b/ui/locale/el/LC_MESSAGES/frontend.po
index 553d1630ba0..8ee0fe0949b 100644
--- a/ui/locale/el/LC_MESSAGES/frontend.po
+++ b/ui/locale/el/LC_MESSAGES/frontend.po
@@ -12929,6 +12929,10 @@ msgstr "Αριθμός συλλεκτών βάσεως δεδομένων Zabbix
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Αριθμός συλλεκτών στο ιστορικό διαμεσολαβητή οι οποίοι ακόμη δεν απεστάλησαν στον εξυπηρετητή"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Αριθμός συλλεκτών στην ουρά οι οποίοι καθυστερούν από έως σε δευτερόλεπτα, χωρίς αποκλεισμούς."
diff --git a/ui/locale/en/LC_MESSAGES/frontend.pot b/ui/locale/en/LC_MESSAGES/frontend.pot
index cd5d7296a2a..47d6e801efa 100644
--- a/ui/locale/en/LC_MESSAGES/frontend.pot
+++ b/ui/locale/en/LC_MESSAGES/frontend.pot
@@ -12917,6 +12917,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/en_GB/LC_MESSAGES/frontend.po b/ui/locale/en_GB/LC_MESSAGES/frontend.po
index 8a06163dce3..35a1bef6cf9 100644
--- a/ui/locale/en_GB/LC_MESSAGES/frontend.po
+++ b/ui/locale/en_GB/LC_MESSAGES/frontend.po
@@ -12916,6 +12916,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/es/LC_MESSAGES/frontend.po b/ui/locale/es/LC_MESSAGES/frontend.po
index ed358acf4fe..121bff6133c 100644
--- a/ui/locale/es/LC_MESSAGES/frontend.po
+++ b/ui/locale/es/LC_MESSAGES/frontend.po
@@ -4,8 +4,8 @@ msgstr ""
"Project-Id-Version: Zabbix 6.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-03-02 16:36+0200\n"
-"PO-Revision-Date: 2022-05-24 14:36+0000\n"
-"Last-Translator: Jurijs <jurijs.klopovskis@zabbix.com>\n"
+"PO-Revision-Date: 2022-06-07 17:11+0000\n"
+"Last-Translator: Edinson <edinson.vivas@gmail.com>\n"
"Language-Team: Zabbix <info@zabbix.com>\n"
"Language: es\n"
"MIME-Version: 1.0\n"
@@ -2115,7 +2115,7 @@ msgstr[1] "No se pudieron copiar los iniciadores"
#: include/triggers.inc.php:246
#, c-format
msgid "Cannot copy trigger \"%1$s\", because it has multiple hosts in the expression."
-msgstr ""
+msgstr "No se puede copiar el activador \"%1$s\", porque tiene varios hosts en la expresión."
#: include/triggers.inc.php:226
#, c-format
@@ -2317,7 +2317,7 @@ msgstr[1] "No es posible eliminar el tipo de medio"
#, c-format
msgid "Cannot delete module: %1$s."
msgid_plural "Cannot delete modules: %1$s."
-msgstr[0] ""
+msgstr[0] "No es posible eliminar el módulo: %1$s."
msgstr[1] "No puede eliminar el módulo: %1$s."
#: sysmaps.php:261
@@ -2588,14 +2588,14 @@ msgstr[1] "No se pudieron habilitar los tipos de soporte"
#, c-format
msgid "Cannot enable module: %1$s."
msgid_plural "Cannot enable modules: %1$s."
-msgstr[0] ""
-msgstr[1] ""
+msgstr[0] "No se puede habilitar el módulo: %1$s."
+msgstr[1] "No se puede habilitar los módulos: %1$s."
#: app/controllers/CControllerScheduledReportEnable.php:78
msgid "Cannot enable scheduled report"
msgid_plural "Cannot enable scheduled reports"
-msgstr[0] ""
-msgstr[1] ""
+msgstr[0] "No se puede habilitar el reporte programado"
+msgstr[1] "No se puede habilitar los reportes programados"
#: triggers.php:489
msgid "Cannot enable trigger"
@@ -2611,11 +2611,11 @@ msgstr[1] "No se pudo habilitar los escenarios web"
#: app/controllers/CControllerPopupTestTriggerExpr.php:199
msgid "Cannot evaluate expression"
-msgstr ""
+msgstr "No se puede evaluar la expresión"
#: app/controllers/CControllerPopupScriptExec.php:123
msgid "Cannot execute script."
-msgstr ""
+msgstr "No se puede ejecutar el script."
#: jsLoader.php:287
msgid "Cannot expand macros."
@@ -2629,7 +2629,7 @@ msgstr "No se pudo encontrar la imagen de fondo \"%1$s\" usada en el mapa \"%2$s
#: include/classes/import/importers/CTemplateDashboardImporter.php:170
#, c-format
msgid "Cannot find graph \"%1$s\" used in dashboard \"%2$s\"."
-msgstr ""
+msgstr "No se puede encontrar el gráfico \"%1$s\" utilizado en el tablero \"%2$s\"."
#: include/classes/import/importers/CMapImporter.php:152
#, c-format
@@ -2639,7 +2639,7 @@ msgstr "No se pudo encontrar el grupo \"%1$s\" usado en el mapa \"%2$s\"."
#: include/classes/import/importers/CTemplateDashboardImporter.php:140
#, c-format
msgid "Cannot find host \"%1$s\" used in dashboard \"%2$s\"."
-msgstr ""
+msgstr "No se puede encontrar el host \"%1$s\" utilizado en el tablero \"%2$s\"."
#: include/classes/import/importers/CMapImporter.php:164
#, c-format
@@ -2672,17 +2672,17 @@ msgstr "No se pudo encontrar la asignación de icono \"%1$s\" usado en el mapa \
#: include/classes/import/CConfigurationImport.php:952
#, c-format
msgid "Cannot find interface \"%1$s\" used for discovery rule \"%2$s\" on \"%3$s\"."
-msgstr ""
+msgstr "No se puede encontrar la interfaz \"%1$s\" utilizada para la regla de descubrimiento \"%2$s\" en \"%3$s\"."
#: include/classes/import/CConfigurationImport.php:719
#, c-format
msgid "Cannot find interface \"%1$s\" used for item \"%2$s\" on \"%3$s\"."
-msgstr ""
+msgstr "No es posible encontrar la interfaz \"%1$s\" usada por el elemento \"%2$s\" en \"%3$s\"."
#: include/classes/import/CConfigurationImport.php:1105
#, c-format
msgid "Cannot find interface \"%1$s\" used for item prototype \"%2$s\" of discovery rule \"%3$s\" on \"%4$s\"."
-msgstr ""
+msgstr "No es posible encontrar la interfaz \"%1$s\" usada por el prototipo de item \"%2$s\" en la regla de descubrimiento \"%3$s\" en \"%4$s\"."
#: include/classes/import/CConfigurationImport.php:1601
#, c-format
@@ -2717,7 +2717,7 @@ msgstr "No pude encontrar el elemento \"%1$s\" en \"%2$s\" usado en el prototipo
#: include/classes/import/importers/CTemplateDashboardImporter.php:155
#, c-format
msgid "Cannot find item \"%1$s\" used in dashboard \"%2$s\"."
-msgstr ""
+msgstr "No se puede encontrar el elemento\"%1$s\" utilizado en el tablero \"%2$s\"."
#: include/classes/import/importers/CMapImporter.php:140
#, c-format
@@ -2732,7 +2732,7 @@ msgstr "No se pudo encontrar la plantilla \"%1$s\" de un prototipo de equipo \"%
#: include/classes/import/CConfigurationImport.php:1002
#, c-format
msgid "Cannot find template \"%1$s\" for override \"%2$s\" of discovery rule \"%3$s\" on \"%4$s\"."
-msgstr ""
+msgstr "No se puede encontrar la plantilla \"%1$s\" para anular \"%2$s\" de la regla de descubrimiento \"%3$s\" en \"%4$s\"."
#: include/classes/import/importers/CMapImporter.php:179
#: include/classes/import/importers/CMapImporter.php:229
@@ -2756,13 +2756,13 @@ msgstr "No se pudo encontrar el mapa de valores \"%1$s\" usado en el prototipo d
#: include/classes/api/services/CRole.php:683
#, c-format
msgid "Cannot have non-default \"%2$s\" rule while having \"%3$s\" set to %4$d for user role \"%1$s\"."
-msgstr ""
+msgstr "No se puede tener una regla \"%2$s\" no predeterminada mientras se tiene \"%3$s\" establecido en %4$d para el rol de usuario \"%1$s\"."
#: include/classes/api/services/CRole.php:569
#: include/classes/api/services/CRole.php:645
#, c-format
msgid "Cannot have non-empty tag value while having empty tag in rule \"%2$s\" for user role \"%1$s\"."
-msgstr ""
+msgstr "No se puede tener un valor de etiqueta no vacío mientras se tiene una etiqueta vacía en la regla \"%2$s\" para el rol de usuario \"%1$s\"."
#: include/classes/import/importers/CTemplateImporter.php:258
#: include/classes/import/importers/CTemplateImporter.php:410
@@ -2783,54 +2783,54 @@ msgstr "No se puede linkar plantilla"
#: include/classes/api/services/CHostBase.php:520
#, c-format
msgid "Cannot link template \"%1$s\" to host \"%2$s\" because its parent template \"%3$s\" will be linked twice."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" al host \"%2$s\" porque su plantilla padre \"%3$s\" se vinculará dos veces."
#: include/classes/api/services/CHostBase.php:678
#: include/classes/api/services/CHostBase.php:755
#, c-format
msgid "Cannot link template \"%1$s\" to host \"%2$s\" due to dependency of trigger \"%3$s\"."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" al host \"%2$s\" debido a la dependencia del disparador \"%3$s\"."
#: include/classes/api/services/CHostBase.php:805
#, c-format
msgid "Cannot link template \"%1$s\" to host \"%2$s\" due to expression of trigger \"%3$s\"."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" al host \"%2$s\" debido a la expresión del disparador \"%3$s\"."
#: include/classes/api/services/CHostBase.php:644
#, c-format
msgid "Cannot link template \"%1$s\" to host \"%2$s\", because its parent template \"%3$s\" would be linked twice."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" al equipo \"%2$s\", porque su plantilla padre \"%3$s\" se vincularía dos veces."
#: include/classes/api/services/CHostBase.php:517
#, c-format
msgid "Cannot link template \"%1$s\" to host prototype \"%2$s\" because its parent template \"%3$s\" will be linked twice."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" al prototipo de equipo \"%2$s\" porque su plantilla padre \"%3$s\" se vinculará dos veces."
#: include/classes/api/services/CHostBase.php:675
#: include/classes/api/services/CHostBase.php:752
#, c-format
msgid "Cannot link template \"%1$s\" to host prototype \"%2$s\" due to dependency of trigger \"%3$s\"."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" al prototipo de equipo \"%2$s\" debido a la dependencia del disparador \"%3$s\"."
#: include/classes/api/services/CHostBase.php:802
#, c-format
msgid "Cannot link template \"%1$s\" to host prototype \"%2$s\" due to expression of trigger \"%3$s\"."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" al prototipo del equipo \"%2$s\" debido a la expresión del disparador \"%3$s\"."
#: include/classes/api/services/CHostBase.php:641
#, c-format
msgid "Cannot link template \"%1$s\" to host prototype \"%2$s\", because its parent template \"%3$s\" would be linked twice."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" al prototipo de equipo \"%2$s\", porque su plantilla padre \"%3$s\" se vincularía dos veces."
#: include/classes/api/services/CHostBase.php:381
#, c-format
msgid "Cannot link template \"%1$s\" to template \"%2$s\" because circular linkage (%3$s) will occurs."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" a la plantilla \"%2$s\" porque se ocurrirá una vinculación circular (%3$s)."
#: include/classes/api/services/CHostBase.php:514
#, c-format
msgid "Cannot link template \"%1$s\" to template \"%2$s\" because its parent template \"%3$s\" will be linked twice."
-msgstr ""
+msgstr "No se puede vincular la plantilla \"%1$s\" a la plantilla \"%2$s\" porque su plantilla padre \"%3$s\" se vinculará dos veces."
#: include/classes/api/services/CHostBase.php:672
#: include/classes/api/services/CHostBase.php:749
@@ -2938,7 +2938,7 @@ msgstr "No se pudo leer el XML: %1$s."
#: include/classes/import/readers/CYamlImportReader.php:55
#, c-format
msgid "Cannot read YAML: %1$s."
-msgstr ""
+msgstr "No se puede leer YAML: %1$s."
#: include/classes/server/CZabbixServer.php:543
#, c-format
@@ -3382,13 +3382,13 @@ msgstr "No es posible actualizar al usuario"
#: app/controllers/CControllerUsergroupUpdate.php:105
msgid "Cannot update user group"
msgid_plural "Cannot update user groups"
-msgstr[0] ""
-msgstr[1] ""
+msgstr[0] "No se puede actualizar el grupo de usuarios"
+msgstr[1] "No se puede actualizar el grupo de usuarios"
#: app/controllers/CControllerUserroleUpdate.php:106
#: app/controllers/CControllerUserroleUpdate.php:176
msgid "Cannot update user role"
-msgstr ""
+msgstr "No se puede actualizar el rol de usuario"
#: httpconf.php:194
msgid "Cannot update web scenario"
@@ -12915,6 +12915,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/fa/LC_MESSAGES/frontend.po b/ui/locale/fa/LC_MESSAGES/frontend.po
index 80b6052ffe0..70750dcea46 100644
--- a/ui/locale/fa/LC_MESSAGES/frontend.po
+++ b/ui/locale/fa/LC_MESSAGES/frontend.po
@@ -12807,6 +12807,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/fi/LC_MESSAGES/frontend.po b/ui/locale/fi/LC_MESSAGES/frontend.po
index ceb3dd1ffe4..6e5f88da3ac 100644
--- a/ui/locale/fi/LC_MESSAGES/frontend.po
+++ b/ui/locale/fi/LC_MESSAGES/frontend.po
@@ -12914,6 +12914,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/fr/LC_MESSAGES/frontend.po b/ui/locale/fr/LC_MESSAGES/frontend.po
index 3c1fba149f8..7571ef2acb2 100644
--- a/ui/locale/fr/LC_MESSAGES/frontend.po
+++ b/ui/locale/fr/LC_MESSAGES/frontend.po
@@ -4,8 +4,8 @@ msgstr ""
"Project-Id-Version: Zabbix 6.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-03-02 16:36+0200\n"
-"PO-Revision-Date: 2022-04-04 11:01+0000\n"
-"Last-Translator: Florent <fpasero@gab6.fr>\n"
+"PO-Revision-Date: 2022-05-14 13:53+0000\n"
+"Last-Translator: ERIC <eric78.zabbix@orange.fr>\n"
"Language-Team: Zabbix <info@zabbix.com>\n"
"Language: fr\n"
"MIME-Version: 1.0\n"
@@ -9582,7 +9582,7 @@ msgstr ""
#: app/partials/configuration.tags.tab.php:140
msgid "Inherited and item tags"
-msgstr ""
+msgstr "Tags hérités et d'élément"
#: app/partials/configuration.tags.tab.php:135
msgid "Inherited and scenario tags"
@@ -12926,6 +12926,10 @@ msgstr "Nombre d'éléments dans la base de données Zabbix."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Nombre d'éléments dans l'historique du proxy qui ne sont pas encore envoyés au serveur"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Nombre d'éléments dans la queue qui rencontrent un délai de from à to secondes, incluses."
diff --git a/ui/locale/he/LC_MESSAGES/frontend.po b/ui/locale/he/LC_MESSAGES/frontend.po
index 767f6771b98..2b9676168d9 100644
--- a/ui/locale/he/LC_MESSAGES/frontend.po
+++ b/ui/locale/he/LC_MESSAGES/frontend.po
@@ -4,7 +4,7 @@ msgstr ""
"Project-Id-Version: Zabbix 6.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-03-02 16:36+0200\n"
-"PO-Revision-Date: 2022-05-27 06:24+0000\n"
+"PO-Revision-Date: 2022-06-07 11:58+0000\n"
"Last-Translator: Yaron <sh.yaron@gmail.com>\n"
"Language-Team: none\n"
"Language: he\n"
@@ -12925,6 +12925,10 @@ msgstr "מספר הפריטים במסד הנתונים של Zabbix."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "מספר הפריטים בהיסטוריה של המתווך שלא נשלחו עדיין לשרת"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr "מספר הפריטים בתור שהושהו למשך תקופת שניות מ־ עד, כולל."
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "מספר הפריטים בתור שהושהו למשך תקופת שניות מ־ עד, כולל."
@@ -17944,57 +17948,57 @@ msgstr "ההקפצה „%1$s” שייכת לתבניות עם קישוריות
#: include/triggers.inc.php:372 include/triggers.inc.php:433
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the non-existent trigger \"%2$s\" on the host \"%3$s\"."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להסתמך על ההקפצה „%2$s” שאינה קיימת, במארח „%3$s”."
#: include/triggers.inc.php:373 include/triggers.inc.php:434
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the non-existent trigger \"%2$s\" on the template \"%3$s\"."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להסתמך על ההקפצה „%2$s”, שאינה קיימת, בתבנית „%3$s”."
#: include/classes/api/services/CTriggerGeneral.php:2728
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\" from the host \"%3$s\", because dependencies on triggers from a child template or host are not allowed."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להיות תלויה בהקפצה „%2$s” מהמארח „%3$s”, כיוון שתלויות בהקפצות מתבנית צאצאית או מארח אסורות."
#: include/classes/api/services/CTriggerGeneral.php:2727
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\" from the template \"%3$s\", because dependencies on triggers from a child template or host are not allowed."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להיות תלויה בהקפצה „%2$s” מהתבנית „%3$s”, כיוון שתלויות בהקפצות מתבנית צאצאית או מארח אסורות."
#: include/classes/api/services/CTriggerGeneral.php:2621
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\" from the template \"%3$s\", because dependencies on triggers from the parent template are not allowed."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להיות תלויה בהקפצה „%2$s” מהתבנית „%3$s”, כיוון שתלויות בהקפצות מתבנית הורה אסורות."
#: include/classes/api/services/CTriggerGeneral.php:2354
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\", because a circular linkage (%3$s) would occur for host \"%4$s\"."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להיות תלויה בהקפצה „%2$s” כיוון שתיווצר קישוריות מעגלית (%3$s) למארח „%4$s”."
#: include/classes/api/services/CTriggerGeneral.php:2349
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\", because a circular linkage (%3$s) would occur for template \"%4$s\"."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להיות תלויה בהקפצה „%2$s” כיוון שתיווצר קישוריות מעגלית (%3$s) לתבנית „%4$s”."
#: include/classes/api/services/CTriggerGeneral.php:2365
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\", because a circular linkage (%3$s) would occur."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להיות תלויה בהקפצה „%2$s” כיוון שתיווצר קישוריות מעגלית (%3$s)."
#: include/classes/api/services/CTriggerGeneral.php:2472
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\", because dependencies of host triggers on template triggers are not allowed."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להיות תלויה בהקפצה „%2$s” כיוון שתלויות של הקפצות מארח בתבניות הקפצה אסורות."
#: include/classes/api/services/CTriggerGeneral.php:2857
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\", because the template \"%3$s\" is not linked to the host \"%4$s\"."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להסתמך על ההקפצה „%2$s”, כיוון שהתבנית „%3$s” לא מקושרת למארח „%4$s”."
#: include/classes/api/services/CTriggerGeneral.php:2856
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the trigger \"%2$s\", because the template \"%3$s\" is not linked to the template \"%4$s\"."
-msgstr ""
+msgstr "ההקפצה „%1$s” לא יכולה להסתמך על ההקפצה „%2$s”, כיוון שהתבנית „%3$s” לא מקושרת לתבנית „%4$s”."
#: include/classes/import/CConfigurationImport.php:1800
#, c-format
@@ -18101,62 +18105,62 @@ msgstr "אבטיפוס ההקפצה „%1$s” שייך לתבניות עם קי
#: include/classes/api/services/CDiscoveryRule.php:916
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the non-existent trigger \"%2$s\" on the host \"%3$s\"."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להסתמך על ההקפצה „%2$s” שאינה קיימת, במארח „%3$s”."
#: include/classes/api/services/CDiscoveryRule.php:915
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the non-existent trigger \"%2$s\" on the template \"%3$s\"."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להסתמך על ההקפצה „%2$s”, שאינה קיימת, בתבנית „%3$s”."
#: include/classes/api/services/CTriggerGeneral.php:2733
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger \"%2$s\" from the host \"%3$s\", because dependencies on triggers from a child template or host are not allowed."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להיות תלויה בהקפצה „%2$s” מהמארח „%3$s”, כיוון שתלויות בהקפצות מתבנית צאצאית או מארח אסורות."
#: include/classes/api/services/CTriggerGeneral.php:2732
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger \"%2$s\" from the template \"%3$s\", because dependencies on triggers from a child template or host are not allowed."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להיות תלויה בהקפצה „%2$s” מהתבנית „%3$s”, כיוון שתלויות בהקפצות מתבנית צאצאית או מארח אסורות."
#: include/classes/api/services/CTriggerGeneral.php:2622
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger \"%2$s\" from the template \"%3$s\", because dependencies on triggers from the parent template are not allowed."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להיות תלוי בהקפצה „%2$s” מהתבנית „%3$s”, כיוון שתלויות בהקפצות מתבנית הורה אסורות."
#: include/classes/api/services/CTriggerGeneral.php:2473
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger \"%2$s\", because dependencies of host triggers on template triggers are not allowed."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להיות תלוי בהקפצה „%2$s” כיוון שתלויות של הקפצות מארח בתבניות הקפצה אסורות."
#: include/classes/api/services/CTriggerGeneral.php:2862
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger \"%2$s\", because the template \"%3$s\" is not linked to the host \"%4$s\"."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להסתמך על ההקפצה „%2$s”, כיוון שהתבנית „%3$s” לא מקושרת למארח „%4$s”."
#: include/classes/api/services/CTriggerGeneral.php:2861
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger \"%2$s\", because the template \"%3$s\" is not linked to the template \"%4$s\"."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להסתמך על ההקפצה „%2$s”, כיוון שהתבנית „%3$s” לא מקושרת לתבנית „%4$s”."
#: include/classes/api/services/CTriggerGeneral.php:2355
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger prototype \"%2$s\", because a circular linkage (%3$s) would occur for host \"%4$s\"."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להיות תלוי באבטיפוס ההקפצה „%2$s” כיוון שתיווצר קישוריות מעגלית (%3$s) למארח „%4$s”."
#: include/classes/api/services/CTriggerGeneral.php:2350
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger prototype \"%2$s\", because a circular linkage (%3$s) would occur for template \"%4$s\"."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להיות תלוי באבטיפוס ההקפצה „%2$s” כיוון שתיווצר קישוריות מעגלית (%3$s) לתבנית „%4$s”."
#: include/classes/api/services/CTriggerGeneral.php:2366
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger prototype \"%2$s\", because a circular linkage (%3$s) would occur."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להיות תלוי באבטיפוס ההקפצה „%2$s”, כיוון שתיווצר קישוריות מעגלית (%3$s)."
#: include/classes/api/services/CTriggerGeneral.php:1325
#, c-format
msgid "Trigger prototype \"%1$s\" cannot depend on the trigger prototype \"%2$s\", because dependencies on trigger prototypes from another LLD rule are not allowed."
-msgstr ""
+msgstr "אבטיפוס ההקפצה „%1$s” לא יכול להיות תלוי באבטיפוס ההקפצה „%2$s” כיוון שתלויות באבות טיפוס של הקפצות מכלל LLD (גילוי ברמה נמוכה) אסורים."
#: include/classes/api/services/CTriggerGeneral.php:1716
#, c-format
diff --git a/ui/locale/hu/LC_MESSAGES/frontend.po b/ui/locale/hu/LC_MESSAGES/frontend.po
index 6e3259ee169..f4703cb38f5 100644
--- a/ui/locale/hu/LC_MESSAGES/frontend.po
+++ b/ui/locale/hu/LC_MESSAGES/frontend.po
@@ -12919,6 +12919,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/id/LC_MESSAGES/frontend.po b/ui/locale/id/LC_MESSAGES/frontend.po
index 541b52fd3d0..040a92f5b7a 100644
--- a/ui/locale/id/LC_MESSAGES/frontend.po
+++ b/ui/locale/id/LC_MESSAGES/frontend.po
@@ -12810,6 +12810,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/it/LC_MESSAGES/frontend.po b/ui/locale/it/LC_MESSAGES/frontend.po
index 71004fe97c2..d259c5b6e59 100644
--- a/ui/locale/it/LC_MESSAGES/frontend.po
+++ b/ui/locale/it/LC_MESSAGES/frontend.po
@@ -12916,6 +12916,10 @@ msgstr "Numero di item all'intero del database Zabbix."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Numero di item all'interno della proxy history che non sono ancora stati inviati al server"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Numero di item all'interno della coda che sono in ritardo da a secondi, inclusi."
diff --git a/ui/locale/ja/LC_MESSAGES/frontend.po b/ui/locale/ja/LC_MESSAGES/frontend.po
index c72959c01d6..c7fb8dedf0d 100644
--- a/ui/locale/ja/LC_MESSAGES/frontend.po
+++ b/ui/locale/ja/LC_MESSAGES/frontend.po
@@ -4,8 +4,8 @@ msgstr ""
"Project-Id-Version: Zabbix 6.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-03-02 16:36+0200\n"
-"PO-Revision-Date: 2022-05-27 12:28+0000\n"
-"Last-Translator: Atsushi <kha00760@nifty.com>\n"
+"PO-Revision-Date: 2022-06-06 11:12+0000\n"
+"Last-Translator: Atsushi Tanaka <kha00760@nifty.com>\n"
"Language-Team: Zabbix\n"
"Language: ja\n"
"MIME-Version: 1.0\n"
@@ -2972,12 +2972,12 @@ msgstr "ホスト\"%4$s\"上のトリガー\"%3$s\"の依存関係によって
#: include/classes/api/services/CHostBase.php:414
#, c-format
msgid "Cannot unlink template \"%1$s\" from template \"%2$s\" due to dependency of trigger \"%3$s\" on template \"%4$s\"."
-msgstr ""
+msgstr "テンプレート\"%4$s\"上のトリガー\"%3$s\"の依存関係によって、テンプレート\"%2$s\"からテンプレート\"%1$s\"へのリンクを削除できません。"
#: include/classes/api/services/CHostBase.php:330
#, c-format
msgid "Cannot unlink template \"%1$s\" from template \"%2$s\" due to dependency of trigger \"%3$s\"."
-msgstr ""
+msgstr "トリガー\"%3$s\"の依存関係によって、テンプレート\"%2$s\"からテンプレート\"%1$s\"へのリンクを削除できません。"
#: include/classes/api/services/CHostBase.php:256
#, c-format
@@ -12813,6 +12813,10 @@ msgstr "Zabbixデータベース内のアイテム数。"
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "サーバーへ未送信のプロキシ内のヒストリアイテム数"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "fromからtoまでの時間遅れているキュー内のアイテム数。"
@@ -17376,17 +17380,17 @@ msgstr "クローンしたテンプレートは\"隠しテキスト\"タイプ
#: include/classes/api/services/CTriggerGeneral.php:2245
#, c-format
msgid "The dependency of trigger \"%1$s\" on trigger \"%2$s\" already exists."
-msgstr ""
+msgstr "トリガー\"%2$s\"上のトリガー\"%1$s\"の依存関係がすでに存在します。"
#: include/classes/api/services/CTriggerGeneral.php:2249
#, c-format
msgid "The dependency of trigger prototype \"%1$s\" on trigger \"%2$s\" already exists."
-msgstr ""
+msgstr "トリガー\"%2$s\"上のトリガーのプロトタイプ\"%1$s\"の依存関係がすでに存在します。"
#: include/classes/api/services/CTriggerGeneral.php:2250
#, c-format
msgid "The dependency of trigger prototype \"%1$s\" on trigger prototype \"%2$s\" already exists."
-msgstr ""
+msgstr "トリガーのプロトタイプ\"%2$s\"上のトリガーのプロトタイプ\"%1$s\"の依存関係がすでに存在します。"
#: app/views/administration.geomaps.edit.php:36
msgid "The following placeholders are supported:"
@@ -17819,12 +17823,12 @@ msgstr "トリガー\"%1$s\"はリンクされている他のテンプレート
#: include/triggers.inc.php:372 include/triggers.inc.php:433
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the non-existent trigger \"%2$s\" on the host \"%3$s\"."
-msgstr ""
+msgstr "ホスト\"%3$s\"上のトリガー\"%1$s\"を存在しないトリガー\"%2$s\"に依存させることができません。"
#: include/triggers.inc.php:373 include/triggers.inc.php:434
#, c-format
msgid "Trigger \"%1$s\" cannot depend on the non-existent trigger \"%2$s\" on the template \"%3$s\"."
-msgstr ""
+msgstr "テンプレート\"%3$s\"上のトリガー\"%1$s\"を存在しないトリガー\"%2$s\"に依存させることができません。"
#: include/classes/api/services/CTriggerGeneral.php:2728
#, c-format
diff --git a/ui/locale/ka/LC_MESSAGES/frontend.po b/ui/locale/ka/LC_MESSAGES/frontend.po
index 97eeb0512d6..de5d792cbbe 100644
--- a/ui/locale/ka/LC_MESSAGES/frontend.po
+++ b/ui/locale/ka/LC_MESSAGES/frontend.po
@@ -12807,6 +12807,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/ko/LC_MESSAGES/frontend.po b/ui/locale/ko/LC_MESSAGES/frontend.po
index 34b6745c200..db8f81bc25e 100644
--- a/ui/locale/ko/LC_MESSAGES/frontend.po
+++ b/ui/locale/ko/LC_MESSAGES/frontend.po
@@ -12807,6 +12807,10 @@ msgstr "Zabbix데이터베이스 안의 아이템수."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "서버에 송신되지 않은 프록시의 히스토리 아이템 수"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "from에서 to까지의 시간이 늦는 큐의 아이템수."
diff --git a/ui/locale/lt/LC_MESSAGES/frontend.po b/ui/locale/lt/LC_MESSAGES/frontend.po
index 0f47ea1e8fb..bf64719df59 100644
--- a/ui/locale/lt/LC_MESSAGES/frontend.po
+++ b/ui/locale/lt/LC_MESSAGES/frontend.po
@@ -13023,6 +13023,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/lv/LC_MESSAGES/frontend.po b/ui/locale/lv/LC_MESSAGES/frontend.po
index 25472efa2a5..819925c1483 100644
--- a/ui/locale/lv/LC_MESSAGES/frontend.po
+++ b/ui/locale/lv/LC_MESSAGES/frontend.po
@@ -13030,6 +13030,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/nb_NO/LC_MESSAGES/frontend.po b/ui/locale/nb_NO/LC_MESSAGES/frontend.po
index 0488156a6b3..7339e8766c5 100644
--- a/ui/locale/nb_NO/LC_MESSAGES/frontend.po
+++ b/ui/locale/nb_NO/LC_MESSAGES/frontend.po
@@ -12923,6 +12923,10 @@ msgstr "Antall elementer i Zabbix-databasen."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Antall elementer i proxy-historikk som ennå ikke er sendt til serveren"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Antall elementer i køen som er forsinket med fra til til sekunder, inkludert."
diff --git a/ui/locale/nl/LC_MESSAGES/frontend.po b/ui/locale/nl/LC_MESSAGES/frontend.po
index 4b8073875a7..39ceccf223d 100644
--- a/ui/locale/nl/LC_MESSAGES/frontend.po
+++ b/ui/locale/nl/LC_MESSAGES/frontend.po
@@ -12922,6 +12922,10 @@ msgstr "Aantal items in Zabbix database."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Aantal items in de wachtrij van de proxy die nog niet zijn verzonden naar de server"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/pl/LC_MESSAGES/frontend.po b/ui/locale/pl/LC_MESSAGES/frontend.po
index b327f0749f3..88f6b9f2d2b 100644
--- a/ui/locale/pl/LC_MESSAGES/frontend.po
+++ b/ui/locale/pl/LC_MESSAGES/frontend.po
@@ -13039,6 +13039,10 @@ msgstr "Liczba pozycji w bazie danych Zabbix."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Liczba pozycji w historii proxy, które jeszcze nie zostały wysłane do serwera"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Liczba pozycji kolejki, które zostały opóźnione od \"od\" do \"do\" sekund włącznie."
diff --git a/ui/locale/pt_BR/LC_MESSAGES/frontend.po b/ui/locale/pt_BR/LC_MESSAGES/frontend.po
index 32552cff94d..c3b40123940 100644
--- a/ui/locale/pt_BR/LC_MESSAGES/frontend.po
+++ b/ui/locale/pt_BR/LC_MESSAGES/frontend.po
@@ -12927,6 +12927,10 @@ msgstr "Número de itens no banco de dados do Zabbix."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Número de items no histórico do proxy que ainda não foram enviados para o servidor"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Número de itens na fila que estão atrasados em segundos."
diff --git a/ui/locale/pt_PT/LC_MESSAGES/frontend.po b/ui/locale/pt_PT/LC_MESSAGES/frontend.po
index d6d0ff3d851..e5456c7068e 100644
--- a/ui/locale/pt_PT/LC_MESSAGES/frontend.po
+++ b/ui/locale/pt_PT/LC_MESSAGES/frontend.po
@@ -12916,6 +12916,10 @@ msgstr "Número de itens na base de dados do Zabbix."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Número de items no histórico do proxy que ainda não foram enviados para o servidor"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Número de itens na fila que estão atrasados em segundos."
diff --git a/ui/locale/ro/LC_MESSAGES/frontend.po b/ui/locale/ro/LC_MESSAGES/frontend.po
index 3e61089c4c4..cb24364dac6 100644
--- a/ui/locale/ro/LC_MESSAGES/frontend.po
+++ b/ui/locale/ro/LC_MESSAGES/frontend.po
@@ -13044,6 +13044,10 @@ msgstr "Număr de itemuri in baza de date Zabbix."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Număr de itemuri în istoricul proxy care nu au fost încă trimise către server"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Număr de itemi in coadă care sunt întârziați de la pâna la secunde, inclusiv."
diff --git a/ui/locale/ru/LC_MESSAGES/frontend.po b/ui/locale/ru/LC_MESSAGES/frontend.po
index 0b17679740f..36291bfd18f 100644
--- a/ui/locale/ru/LC_MESSAGES/frontend.po
+++ b/ui/locale/ru/LC_MESSAGES/frontend.po
@@ -4,8 +4,8 @@ msgstr ""
"Project-Id-Version: Zabbix 6.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-03-02 16:36+0200\n"
-"PO-Revision-Date: 2022-04-29 13:42+0000\n"
-"Last-Translator: Alexei Vladishev <alex@zabbix.com>\n"
+"PO-Revision-Date: 2022-06-02 13:08+0000\n"
+"Last-Translator: Marina <marina.generalova@zabbix.com>\n"
"Language-Team: Zabbix translation team\n"
"Language: ru\n"
"MIME-Version: 1.0\n"
@@ -313,7 +313,7 @@ msgstr "(0 - использовать умолчание действия)"
#: include/views/configuration.hostgroups.list.php:36
msgid "(Only super admins can create groups)"
-msgstr "(Только супер администраторы могут создавать группы)"
+msgstr "(Только супер-администраторы могут создавать группы)"
#: app/partials/configuration.host.edit.html.php:275
#: app/views/popup.itemtestedit.view.php:241
@@ -13039,6 +13039,10 @@ msgstr "Количество элементов данных в базе дан
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Количество элементов данных в истории прокси, которые еще не были отправлены на сервер"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Количество элементов данных в очереди, которые задерживаются от и до секунд, включительно."
@@ -13352,11 +13356,11 @@ msgstr "Только услуги без тегов проблем"
#: include/classes/api/services/CDashboard.php:414
msgid "Only super admins can set dashboard owner."
-msgstr "Только супер администраторы могут задавать владельца ПАНЕЛИ."
+msgstr "Только супер-администраторы могут задавать владельца ПАНЕЛИ."
#: include/classes/api/services/CReport.php:334
msgid "Only super admins can set report owner."
-msgstr "Только супер администраторы могут задавать владельца отчета."
+msgstr "Только супер-администраторы могут задавать владельца отчета."
#: app/partials/administration.system.info.php:68
msgid "Only triggers assigned to enabled hosts and depending on enabled items are counted"
diff --git a/ui/locale/sk/LC_MESSAGES/frontend.po b/ui/locale/sk/LC_MESSAGES/frontend.po
index 75b03505544..0e3737cd0e7 100644
--- a/ui/locale/sk/LC_MESSAGES/frontend.po
+++ b/ui/locale/sk/LC_MESSAGES/frontend.po
@@ -13032,6 +13032,10 @@ msgstr "Počet položiek v Zabbix databáze."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Počet položiek v histórii proxy, ktoré ešte neboli odoslané serveru"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Počet položiek vo fronte, ktoré meškajú od do sekúnd vrátane."
diff --git a/ui/locale/sv_SE/LC_MESSAGES/frontend.po b/ui/locale/sv_SE/LC_MESSAGES/frontend.po
index 197dce79d9d..c4b220559a6 100644
--- a/ui/locale/sv_SE/LC_MESSAGES/frontend.po
+++ b/ui/locale/sv_SE/LC_MESSAGES/frontend.po
@@ -12915,6 +12915,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/locale/tr/LC_MESSAGES/frontend.po b/ui/locale/tr/LC_MESSAGES/frontend.po
index d68f6fef988..89a6705bd63 100644
--- a/ui/locale/tr/LC_MESSAGES/frontend.po
+++ b/ui/locale/tr/LC_MESSAGES/frontend.po
@@ -12923,6 +12923,10 @@ msgstr "Zabbix veritabanındaki öğe sayısı."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Henüz sunucuya gönderilmeyen vekil sunucu geçmişindeki öğelerin sayısı"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Kuyrukta, başlangıçtan belli saniye kadar ötelenen öğelerin sayısı, kapsayıcı."
diff --git a/ui/locale/uk/LC_MESSAGES/frontend.po b/ui/locale/uk/LC_MESSAGES/frontend.po
index 9fb8b663966..a916bd178c1 100644
--- a/ui/locale/uk/LC_MESSAGES/frontend.po
+++ b/ui/locale/uk/LC_MESSAGES/frontend.po
@@ -13033,6 +13033,10 @@ msgstr "Кількість елементів даних в базі даних
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Кількість елементів даних в історії проксі, які ще не були відправлені на сервер"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Кількість елементів даних в черзі, які затримується від і до секунд, включно."
diff --git a/ui/locale/vi/LC_MESSAGES/frontend.po b/ui/locale/vi/LC_MESSAGES/frontend.po
index 3af3f102fa4..46631529c61 100644
--- a/ui/locale/vi/LC_MESSAGES/frontend.po
+++ b/ui/locale/vi/LC_MESSAGES/frontend.po
@@ -12810,6 +12810,10 @@ msgstr "Số lượng item trong cơ sở dữ liệu Zabbix."
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "Số lượng itrm trong lịch sử proxy mà chưa được gửi đến máy chủ"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "Số lượng item trong hàng đợi mà đang bị trễ từ giây thứ, bao gồm."
diff --git a/ui/locale/zh_CN/LC_MESSAGES/frontend.po b/ui/locale/zh_CN/LC_MESSAGES/frontend.po
index de89c1d2ea4..fbd72fe39ff 100644
--- a/ui/locale/zh_CN/LC_MESSAGES/frontend.po
+++ b/ui/locale/zh_CN/LC_MESSAGES/frontend.po
@@ -12816,6 +12816,10 @@ msgstr "Zabbix 数据库中的监控项数量"
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr "系统代理程序历史数据中未发送给服务端的监控项数量"
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr "队列中延时秒数的项目的数量,包括."
diff --git a/ui/locale/zh_TW/LC_MESSAGES/frontend.po b/ui/locale/zh_TW/LC_MESSAGES/frontend.po
index ea1849a51ee..51f59eae73b 100644
--- a/ui/locale/zh_TW/LC_MESSAGES/frontend.po
+++ b/ui/locale/zh_TW/LC_MESSAGES/frontend.po
@@ -12814,6 +12814,10 @@ msgstr ""
msgid "Number of items in proxy history that are not yet sent to the server"
msgstr ""
+#: include/classes/data/CItemData.php:1668
+msgid "Number of items in the queue which are delayed by from to seconds, inclusive."
+msgstr ""
+
#: include/classes/data/CItemData.php:1685
msgid "Number of items in the queue which are delayed by from to to seconds, inclusive."
msgstr ""
diff --git a/ui/report2.php b/ui/report2.php
index ce1be9a9fbb..9833b989745 100644
--- a/ui/report2.php
+++ b/ui/report2.php
@@ -462,7 +462,7 @@ else {
]))->setWidth(ZBX_TEXTAREA_MEDIUM_WIDTH)
)
->addRow(
- (new CLabel(_('Hosts'), 'filter_hostid__ms')),
+ (new CLabel(_('Hosts'), 'filter_hostids__ms')),
(new CMultiSelect([
'name' => 'filter_hostids[]',
'object_name' => 'hosts',
diff --git a/ui/tests/api_json/testAPIInfo.php b/ui/tests/api_json/testAPIInfo.php
index 42b6e482837..b1a2870c494 100644
--- a/ui/tests/api_json/testAPIInfo.php
+++ b/ui/tests/api_json/testAPIInfo.php
@@ -36,6 +36,6 @@ class testAPIInfo extends CAPITest {
$this->disableAuthorization();
$result = $this->call('apiinfo.version', []);
- $this->assertSame('6.0.5', $result['result']);
+ $this->assertSame('6.0.6', $result['result']);
}
}
diff --git a/ui/tests/include/CIntegrationTest.php b/ui/tests/include/CIntegrationTest.php
index 3bcc78155f0..3db477301fb 100644
--- a/ui/tests/include/CIntegrationTest.php
+++ b/ui/tests/include/CIntegrationTest.php
@@ -361,6 +361,25 @@ class CIntegrationTest extends CAPITest {
}
/**
+ * Checks absence of pid file after kill.
+ *
+ * @param string $component component name
+ *
+ */
+ private static function checkPidKilled($component) {
+
+ for ($r = 0; $r < self::WAIT_ITERATIONS; $r++) {
+ if (!file_exists(self::getPidPath($component))) {
+ return true;
+ }
+
+ sleep(self::WAIT_ITERATION_DELAY);
+ }
+
+ return false;
+ }
+
+ /**
* Wait for component to stop.
*
* @param string $component component name
@@ -370,12 +389,44 @@ class CIntegrationTest extends CAPITest {
protected static function waitForShutdown($component) {
self::validateComponent($component);
- for ($r = 0; $r < self::WAIT_ITERATIONS; $r++) {
- if (!file_exists(self::getPidPath($component))) {
+ if (self::checkPidKilled($component)) {
+ return;
+ }
+
+ $pid = @file_get_contents(self::getPidPath($component));
+
+ $pids = explode("\n", shell_exec('pgrep -P '.$pid));
+ $pids_count = count($pids);
+ $iterations = 0;
+
+ do {
+ for ($i = count($pids) -1; $i >= 0; $i--) {
+ $child_pid = $pids[$i];
+
+ if (is_numeric($child_pid) && posix_kill($child_pid, 0)) {
+ posix_kill($child_pid, SIGKILL);
+ sleep(10 * self::WAIT_ITERATION_DELAY);
+
+ if (!posix_kill($child_pid, 0)) {
+ break;
+ }
+ }
+ }
+
+ if (self::checkPidKilled($component)) {
return;
}
- sleep(self::WAIT_ITERATION_DELAY);
+ $pids = explode("\n", shell_exec('pgrep -P '.$pid));
+ $iterations++;
+ } while (count($pids) > 0 && $iterations < $pids_count );
+
+ if (is_numeric($pid) && posix_kill($pid, 0)) {
+ posix_kill($pid, SIGKILL);
+
+ if (self::checkPidKilled($component)) {
+ return;
+ }
}
throw new Exception('Failed to wait for component "'.$component.'" to stop.');
diff --git a/ui/tests/include/helpers/CDBHelper.php b/ui/tests/include/helpers/CDBHelper.php
index 2ba1a060c1d..59642ba3075 100644
--- a/ui/tests/include/helpers/CDBHelper.php
+++ b/ui/tests/include/helpers/CDBHelper.php
@@ -263,22 +263,50 @@ class CDBHelper {
if ($DB['PASSWORD'] !== '') {
putenv('PGPASSWORD='.$DB['PASSWORD']);
}
- $server = $DB['SERVER'] !== '' ? ' -h'.$DB['SERVER'] : '';
- $db_name = $DB['DATABASE'];
+
+ $cmd = 'pg_dump';
+
+ if ($DB['SERVER'] !== 'v') {
+ $cmd .= ' --host='.$DB['SERVER'];
+ }
+
+ if ($DB['PORT'] !== '' && $DB['PORT'] != 0) {
+ $cmd .= ' --port='.$DB['PORT'];
+ }
+
$file = PHPUNIT_COMPONENT_DIR.$DB['DATABASE'].$suffix.'.dump';
+ $cmd .= ' --username='.$DB['USER'].' --format=d --jobs=5 --dbname='.$DB['DATABASE'];
+ $cmd .= ' --table='.implode(' --table=', $tables).' --file='.$file;
- exec('pg_dump'.$server.' -U'.$DB['USER'].' -Fd -j5 -t'.implode(' -t', $tables).' '.$db_name.' -f'.$file,
- $output, $result_code
- );
+ exec($cmd, $output, $result_code);
if ($result_code != 0) {
throw new Exception('Failed to backup "'.implode('", "', $top_tables).'".');
}
}
else {
- foreach ($tables as $table) {
- DBexecute('DROP TABLE IF EXISTS '.$table.$suffix);
- DBexecute('CREATE TABLE '.$table.$suffix.' AS SELECT * FROM '.$table);
+ if ($DB['PASSWORD'] !== '') {
+ putenv('MYSQL_PWD='.$DB['PASSWORD']);
+ }
+
+ $cmd = 'mysqldump';
+
+ if ($DB['SERVER'] !== 'v') {
+ $cmd .= ' --host='.$DB['SERVER'];
+ }
+
+ if ($DB['PORT'] !== '' && $DB['PORT'] != 0) {
+ $cmd .= ' --port='.$DB['PORT'];
+ }
+
+ $file = PHPUNIT_COMPONENT_DIR.$DB['DATABASE'].$suffix.'.dump.gz';
+ $cmd .= ' --user='.$DB['USER'].' --add-drop-table '.$DB['DATABASE'];
+ $cmd .= ' '.implode(' ', $tables).' | gzip -c > '.$file;
+
+ exec($cmd, $output, $result_code);
+
+ if ($result_code != 0) {
+ throw new Exception('Failed to backup "'.implode('", "', $top_tables).'".');
}
}
}
@@ -301,39 +329,75 @@ class CDBHelper {
if ($DB['PASSWORD'] !== '') {
putenv('PGPASSWORD='.$DB['PASSWORD']);
}
- $server = $DB['SERVER'] !== '' ? ' -h'.$DB['SERVER'] : '';
- $db_name = $DB['DATABASE'];
+
+ $cmd = 'pg_restore';
+
+ if ($DB['SERVER'] !== 'v') {
+ $cmd .= ' --host='.$DB['SERVER'];
+ }
+
+ if ($DB['PORT'] !== '' && $DB['PORT'] != 0) {
+ $cmd .= ' --port='.$DB['PORT'];
+ }
+
$file = PHPUNIT_COMPONENT_DIR.$DB['DATABASE'].$suffix.'.dump';
+ $cmd .= ' --username='.$DB['USER'].' --format=d --jobs=5 --clean --dbname='.$DB['DATABASE'];
+ $cmd .= ' '.$file;
- exec('pg_restore'.$server.' -U'.$DB['USER'].' -Fd -j5 --clean -d '.$db_name.' '.$file, $output,
- $result_code
- );
+ exec($cmd, $output, $result_code);
if ($result_code != 0) {
throw new Exception('Failed to restore "'.$file.'".');
}
- exec('rm -rf '.$file);
+ if (strstr(strtolower(PHP_OS), 'win') !== false) {
+ $file = str_replace('/', '\\', $file);
+ exec('rd '.$file.' /q /s');
+ }
+ else {
+ exec('rm -rf '.$file, $output, $result_code);
+ }
if ($result_code != 0) {
throw new Exception('Failed to remove "'.$file.'".');
}
}
else {
- $result = DBselect('SELECT @@unique_checks,@@foreign_key_checks');
- $row = DBfetch($result);
- DBexecute('SET unique_checks=0,foreign_key_checks=0');
+ if ($DB['PASSWORD'] !== '') {
+ putenv('MYSQL_PWD='.$DB['PASSWORD']);
+ }
+
+ $cmd = 'mysql';
+
+ if ($DB['SERVER'] !== 'v') {
+ $cmd .= ' --host='.$DB['SERVER'];
+ }
- foreach (array_reverse($tables) as $table) {
- DBexecute('DELETE FROM '.$table);
+ if ($DB['PORT'] !== '' && $DB['PORT'] != 0) {
+ $cmd .= ' --port='.$DB['PORT'];
}
- foreach ($tables as $table) {
- DBexecute('INSERT INTO '.$table.' SELECT * FROM '.$table.$suffix);
- DBexecute('DROP TABLE '.$table.$suffix);
+ $file = PHPUNIT_COMPONENT_DIR.$DB['DATABASE'].$suffix.'.dump.gz';
+ $cmd .= ' --user='.$DB['USER'].' '.$DB['DATABASE'];
+ $cmd = 'gzip -cd '.$file.' | '.$cmd;
+
+ exec($cmd, $output, $result_code);
+
+ if ($result_code != 0) {
+ throw new Exception('Failed to restore "'.$file.'".');
+ }
+
+ if (strstr(strtolower(PHP_OS), 'win') !== false) {
+ $file = str_replace('/', '\\', $file);
+ exec('del '.$file);
+ }
+ else {
+ exec('rm -rf '.$file, $output, $result_code);
}
- DBexecute('SET foreign_key_checks='.$row['@@foreign_key_checks'].',unique_checks='.$row['@@unique_checks']);
+ if ($result_code != 0) {
+ throw new Exception('Failed to remove "'.$file.'".');
+ }
}
}
diff --git a/ui/tests/include/web/CElement.php b/ui/tests/include/web/CElement.php
index 1fd95f14912..637263bd8e2 100644
--- a/ui/tests/include/web/CElement.php
+++ b/ui/tests/include/web/CElement.php
@@ -740,4 +740,22 @@ class CElement extends CBaseElement implements IWaitable {
public function scrollToTop() {
CElementQuery::getDriver()->executeScript('arguments[0].scrollTo(0, 0)', [$this]);
}
+
+ /**
+ * Check presence of the class(es).
+ *
+ * @param string|array $class class or classes to be present.
+ *
+ * @return boolean
+ */
+ function hasClass($class) {
+ $attribute = parent::getAttribute('class');
+ $classes = ($attribute !== null) ? explode(' ', $attribute) : [];
+
+ if (!is_array($class)) {
+ $class = [$class];
+ }
+
+ return (count(array_diff($class, $classes)) === 0);
+ }
}
diff --git a/ui/tests/integration/testAgentItems.php b/ui/tests/integration/testAgentItems.php
index 7b08086f8ec..846dcac65e8 100644
--- a/ui/tests/integration/testAgentItems.php
+++ b/ui/tests/integration/testAgentItems.php
@@ -37,6 +37,7 @@ class testAgentItems extends CIntegrationTest {
const TEST_LINK_BASE_NAME = 'test_link';
const TEST_FILE_NAME = '/tmp/'.self::TEST_FILE_BASE_NAME;
const TEST_LINK_NAME = '/tmp/'.self::TEST_LINK_BASE_NAME;
+ const TEST_LINK_NAME2 = '/tmp/'.self::TEST_LINK_BASE_NAME.'2';
const TEST_DIR_NAME = '/tmp/dir';
const TEST_DIR1_NAME = 'dir1';
const TEST_DIR_DIR1_NAME = self::TEST_DIR_NAME.'/'.self::TEST_DIR1_NAME;
@@ -269,7 +270,7 @@ class testAgentItems extends CIntegrationTest {
]
],
[
- 'key' => 'vfs.file.get['.self::TEST_LINK_NAME.']',
+ 'key' => 'vfs.file.get['.self::TEST_LINK_NAME2.']',
'type' => ITEM_TYPE_ZABBIX,
'component' => self::COMPONENT_AGENT2,
'valueType' => ITEM_VALUE_TYPE_TEXT,
@@ -277,19 +278,19 @@ class testAgentItems extends CIntegrationTest {
'fields_exec' => ['permissions', 'user', 'group', 'uid', 'gid', 'access', 'change'],
'result' => [
'type' => 'sym',
- 'permissions' => 'stat -c %04a '.self::TEST_LINK_NAME,
- 'user' => 'stat -c %U '.self::TEST_LINK_NAME,
- 'group' => 'stat -c %G '.self::TEST_LINK_NAME,
- 'uid' => 'stat -c %u '.self::TEST_LINK_NAME,
- 'gid' => 'stat -c %g '.self::TEST_LINK_NAME,
+ 'permissions' => 'stat -c %04a '.self::TEST_LINK_NAME2,
+ 'user' => 'stat -c %U '.self::TEST_LINK_NAME2,
+ 'group' => 'stat -c %G '.self::TEST_LINK_NAME2,
+ 'uid' => 'stat -c %u '.self::TEST_LINK_NAME2,
+ 'gid' => 'stat -c %g '.self::TEST_LINK_NAME2,
'size' => 14,
'time' => [
'modify' => '2021-03-29T14:59:09+03:00'
],
'timestamp' => [
- 'access' => 'stat -c %X '.self::TEST_LINK_NAME,
+ 'access' => 'stat -c %X '.self::TEST_LINK_NAME2,
'modify' => self::TEST_MOD_TIMESTAMP,
- 'change' => 'stat -c %Z '.self::TEST_LINK_NAME
+ 'change' => 'stat -c %Z '.self::TEST_LINK_NAME2
]
]
],
@@ -577,11 +578,15 @@ class testAgentItems extends CIntegrationTest {
$this->assertTrue(@file_put_contents(self::TEST_DIR_FILE_NAME, "1st line\n2nd line\n3rd line\n") !== false);
$this->assertTrue(@touch(self::TEST_DIR_FILE_NAME, self::TEST_MOD_TIMESTAMP));
- // Write test symlink
+ // Write test symlinks
if (!file_exists(self::TEST_LINK_NAME)) {
$this->assertTrue(@symlink(self::TEST_FILE_NAME, self::TEST_LINK_NAME));
}
$this->assertTrue(@exec('touch -h -a -m -t 202103291459.09 '.self::TEST_LINK_NAME) !== false);
+ if (!file_exists(self::TEST_LINK_NAME2)) {
+ $this->assertTrue(@symlink(self::TEST_FILE_NAME, self::TEST_LINK_NAME2));
+ }
+ $this->assertTrue(@exec('touch -h -a -m -t 202103291459.09 '.self::TEST_LINK_NAME2) !== false);
if (!file_exists(self::TEST_DIR_LINK_NAME)) {
$this->assertTrue(@symlink(self::TEST_DIR_FILE_NAME, self::TEST_DIR_LINK_NAME));
}
diff --git a/ui/tests/integration/testGoAgentDataCollection.php b/ui/tests/integration/testGoAgentDataCollection.php
index 6ab9c14306c..907ecb321e3 100644
--- a/ui/tests/integration/testGoAgentDataCollection.php
+++ b/ui/tests/integration/testGoAgentDataCollection.php
@@ -29,7 +29,7 @@ class testGoAgentDataCollection extends CIntegrationTest {
const COMPARE_AVERAGE = 0;
const COMPARE_LAST = 1;
- const OFFSET_MAX = 10;
+ const OFFSET_MAX = 20;
private static $hostids = [];
private static $itemids = [];
@@ -448,7 +448,7 @@ class testGoAgentDataCollection extends CIntegrationTest {
}
// Delay to ensure that all metrics were collected.
- sleep(90);
+ sleep(110);
}
/**
@@ -553,9 +553,10 @@ class testGoAgentDataCollection extends CIntegrationTest {
case ITEM_VALUE_TYPE_FLOAT:
case ITEM_VALUE_TYPE_UINT64:
+ $diff_values = [];
+
if (CTestArrayHelper::get($item, 'compareType', self::COMPARE_LAST) === self::COMPARE_AVERAGE) {
$value = [];
- $diff_values = [];
foreach ([self::COMPONENT_AGENT, self::COMPONENT_AGENT2] as $component) {
// Calculate offset between Agent and Agent2 result arrays
@@ -579,19 +580,27 @@ class testGoAgentDataCollection extends CIntegrationTest {
for ($i = 0; $i < self::OFFSET_MAX; $i++) {
$a = $value[self::COMPONENT_AGENT][$i];
$b = $value[self::COMPONENT_AGENT2][$i];
- $diff_values[$i] = abs(abs($a) - abs($b));
+ $diff_values[$i] = abs($a - $b);
}
$offset = array_search(min($diff_values), $diff_values);
$a = $value[self::COMPONENT_AGENT][$offset];
$b = $value[self::COMPONENT_AGENT2][$offset];
+
+ $diff = abs($a - $b);
}
else {
- $a = end($values[self::COMPONENT_AGENT]);
- $b = end($values[self::COMPONENT_AGENT2]);
+ $records = count($values[self::COMPONENT_AGENT]);
+ for ($i = 0; $i < self::OFFSET_MAX; $i++) {
+ $slice = array_slice($values[self::COMPONENT_AGENT], 0, $records - $i);
+ $a = end($slice);
+ $b = end($values[self::COMPONENT_AGENT2]);
+ $diff_values[$i] = abs($a - $b);
+ }
+
+ $diff = min($diff_values);
}
- $diff = abs(abs($a) - abs($b));
$this->assertTrue($diff < $item['threshold'], 'Difference for '.$item['key'].
' is more than defined threshold '.$diff.' > '.$item['threshold']
);
diff --git a/ui/tests/integration/testItemState.php b/ui/tests/integration/testItemState.php
index 7b9ef9c0cc8..25c70bf4d49 100644
--- a/ui/tests/integration/testItemState.php
+++ b/ui/tests/integration/testItemState.php
@@ -137,11 +137,12 @@ class testItemState extends CIntegrationTest {
return [
self::COMPONENT_SERVER => [
'DebugLevel' => 4,
- 'LogFileSize' => 20
+ 'LogFileSize' => 20,
+ 'ListenPort' => self::getConfigurationValue(self::COMPONENT_SERVER, 'ListenPort', 10051)
],
self::COMPONENT_AGENT => [
'Hostname' => 'test_host',
- 'ServerActive' => '127.0.0.1:'.self::getConfigurationValue(self::COMPONENT_SERVER, 'ListenPort'),
+ 'ServerActive' => '127.0.0.1:'.self::getConfigurationValue(self::COMPONENT_SERVER, 'ListenPort', 10051),
'RefreshActiveChecks' => self::REFRESH_ACT_CHKS_INTERVAL,
'BufferSend' => 1
]
diff --git a/ui/tests/selenium/testFormSetup.php b/ui/tests/selenium/testFormSetup.php
index e8023496e8d..147f4fe055b 100644
--- a/ui/tests/selenium/testFormSetup.php
+++ b/ui/tests/selenium/testFormSetup.php
@@ -326,7 +326,7 @@ class testFormSetup extends CWebTest {
// Check screenshot of the Pre-installation summary section.
$skip_fields = [];
- foreach(['Database server', 'Database name'] as $skip_field) {
+ foreach(['Database server', 'Database port', 'Database name'] as $skip_field) {
$xpath = 'xpath://span[text()='.CXPathHelper::escapeQuotes($skip_field).']/../../div[@class="table-forms-td-right"]';
$skip_fields[] = $this->query($xpath)->one();
}
diff --git a/ui/tests/selenium/testPageAdministrationGeneralModules.php b/ui/tests/selenium/testPageAdministrationGeneralModules.php
index e81b8a32477..fd504126a4e 100644
--- a/ui/tests/selenium/testPageAdministrationGeneralModules.php
+++ b/ui/tests/selenium/testPageAdministrationGeneralModules.php
@@ -582,7 +582,7 @@ class testPageAdministrationGeneralModules extends CWebTest {
if (CTestArrayHelper::get($entry, 'check_disabled', true)) {
$this->page->open('zabbix.php?action='.$entry['action'])->waitUntilReady();
$message = CMessageElement::find()->one();
- $this->assertStringContainsString('Class not found for action '.$entry['action'], $message->getText());
+ $this->assertStringContainsString('Class not found', $message->getText());
$this->page->open('zabbix.php?action=module.list');
}
}
diff --git a/ui/tests/selenium/testUrlParameters.php b/ui/tests/selenium/testUrlParameters.php
index a084df06eef..c2df4a4ae07 100644
--- a/ui/tests/selenium/testUrlParameters.php
+++ b/ui/tests/selenium/testUrlParameters.php
@@ -803,9 +803,7 @@ class testUrlParameters extends CLegacyWebTest {
],
[
'url' => 'hostinventoriesoverview.php?filter_groups%5B%5D=9999999&filter_groupby=&filter_set=1',
- 'text_present' => [
- 'No permissions to referred object or it does not exist!'
- ]
+ 'text_present' => 'Host inventory overview'
],
[
'url' => 'hostinventoriesoverview.php',
diff --git a/ui/tests/templates/zabbix.conf.php b/ui/tests/templates/zabbix.conf.php
index 23eb8a125a5..75ee9c84db3 100644
--- a/ui/tests/templates/zabbix.conf.php
+++ b/ui/tests/templates/zabbix.conf.php
@@ -4,7 +4,7 @@ global $DB;
$DB['TYPE'] = '{DBTYPE}';
$DB['SERVER'] = '{DBHOST}';
-$DB['PORT'] = '0';
+$DB['PORT'] = '{DBPORT}';
$DB['DATABASE'] = '{DBNAME}';
$DB['USER'] = '{DBUSER}';
$DB['PASSWORD'] = '{DBPASSWORD}';