zabbix 自动发现 监控 硬盘读写 disk io

直接 上配置:

1、配置文件

cat userparameter_harddisk.conf

#discovery hard disk
UserParameter=custom.vfs.discovery.diskname,/opt/app/zabbix-agent/scripts/check_harddisk.sh diskname_discovery
#disk status
# See https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
# reads completed successfully
UserParameter=custom.vfs.dev.read.ops[*],egrep $1 /proc/diskstats | head -1 | awk ‘{print $$4}‘
# sectors read
UserParameter=custom.vfs.dev.read.sectors[*],egrep $1 /proc/diskstats | head -1 | awk ‘{print $$6}‘
# time spent reading (ms)
UserParameter=custom.vfs.dev.read.ms[*],egrep $1 /proc/diskstats | head -1 | awk ‘{print $$7}‘
# writes completed
UserParameter=custom.vfs.dev.write.ops[*],egrep $1 /proc/diskstats | head -1 | awk ‘{print $$8}‘
# sectors written
UserParameter=custom.vfs.dev.write.sectors[*],egrep $1 /proc/diskstats | head -1 | awk ‘{print $$10}‘
# time spent writing (ms)
UserParameter=custom.vfs.dev.write.ms[*],egrep $1 /proc/diskstats | head -1 | awk ‘{print $$11}‘
# I/Os currently in progress
UserParameter=custom.vfs.dev.io.active[*],egrep $1 /proc/diskstats | head -1 | awk ‘{print $$12}‘
# time spent doing I/Os (ms)
UserParameter=custom.vfs.dev.io.ms[*],egrep $1 /proc/diskstats | head -1 | awk ‘{print $$13}‘
# iostat %util
UserParameter=iostat.util[*],cat /tmp/.iostat.log |grep $1|tail -10|awk ‘{sum+=$NF}END{print sum/NR}‘
# iostat await
UserParameter=iostat.await[*],cat /tmp/.iostat.log |grep $1|tail -10|awk ‘{sum+=$10}END{print sum/NR}‘

cat /opt/app/zabbix-agent/scripts/check_harddisk.sh

#!/bin/bash
# function:monitor redisstatus from zabbix
# License: GPL
# mail:[email protected]
# version:1.0 date:2013-02-04
#chmod 4755 $(which netstat)

diskname_discovery () {
HardDisk=($(grep ‘\b[a-z][a-z][a-z]\b‘ /proc/diskstats|awk ‘{print $3}‘))
[ "${HardDisk[0]}" == "" ] && exit
printf ‘{\n‘
printf ‘\t"data":[\n‘
for((i=0;i<${#HardDisk[@]};++i))
{
num=$(echo $((${#HardDisk[@]}-1)))
if [ "$i" != ${num} ];
then
printf "\t\t{ \n"
printf "\t\t\t\"{#DISKNAME}\":\"${HardDisk[$i]}\"},\n"
else
printf "\t\t{ \n"
printf "\t\t\t\"{#DISKNAME}\":\"${HardDisk[$num]}\"}]}\n"
fi
}
}

case "$1" in
diskname_discovery)
diskname_discovery
;;
*)
echo "Usage: $0 {diskname_discovery}"
;;
esac

2、crontab设置

#Ansible: zabbix disk
*/3 * * * * /usr/bin/iostat -x -m 2 20 >> /tmp/.iostat.log
#Ansible: zabbix disk log delete
59 23 * * * echo > /tmp/.iostat.log

3、导入模板

cat templates.xml

<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>3.4</version>
<date>2018-07-05T05:30:35Z</date>
<groups>
<group>
<name>Template For Base</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template Linux Disk IO</template>
<name>Template Linux Disk IO</name>
<description/>
<groups>
<group>
<name>Template For Base</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<items/>
<discovery_rules>
<discovery_rule>
<name>Linux Disk device discovery</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.discovery.diskname</key>
<delay>3600</delay>
<status>0</status>
<allowed_hosts/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<filter>
<evaltype>0</evaltype>
<formula/>
<conditions>
<condition>
<macro>{#DISKNAME}</macro>
<value/>
<operator>8</operator>
<formulaid>A</formulaid>
</condition>
</conditions>
</filter>
<lifetime>30d</lifetime>
<description>Discovery of disk devices on Linux</description>
<item_prototypes>
<item_prototype>
<name>Disk:{#DISKNAME}:I/O‘s currently in progress</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.dev.io.active[{#DISKNAME}]</key>
<delay>120</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>iops</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:time spent doing I/O</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.dev.io.ms[{#DISKNAME}]</key>
<delay>120</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>ms</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing>
<step>
<type>10</type>
<params/>
</step>
</preprocessing>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:completed reads per second</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.dev.read.ops[{#DISKNAME}]</key>
<delay>120</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>Reads/sec</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing>
<step>
<type>10</type>
<params/>
</step>
</preprocessing>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:completed writes per second</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>custom.vfs.dev.write.ops[{#DISKNAME}]</key>
<delay>120</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>Writes/sec</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing>
<step>
<type>10</type>
<params/>
</step>
</preprocessing>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:iostat await</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>iostat.await[{#DISKNAME}]</key>
<delay>300</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>0</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
<item_prototype>
<name>Disk:{#DISKNAME}:iostat %util</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>iostat.util[{#DISKNAME}]</key>
<delay>300</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>0</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Linux I/O Stats</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<application_prototypes/>
<master_item_prototype/>
</item_prototype>
</item_prototypes>
<trigger_prototypes>
<trigger_prototype>
<expression>{Template Linux Disk IO:iostat.util[{#DISKNAME}].min(#3)}&gt;85</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>{HOST.NAME} {#DISKNAME} iostat %util &gt; 85</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger_prototype>
<trigger_prototype>
<expression>{Template Linux Disk IO:custom.vfs.dev.read.ops[{#DISKNAME}].min(#3)}&gt;10000</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>{HOST.NAME} {#DISKNAME} read ops is too high (&gt;10000)</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger_prototype>
<trigger_prototype>
<expression>{Template Linux Disk IO:custom.vfs.dev.write.ops[{#DISKNAME}].min(#3)}&gt;10000</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>{HOST.NAME} {#DISKNAME} write ops is too high (&gt;10000)</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger_prototype>
</trigger_prototypes>
<graph_prototypes>
<graph_prototype>
<name>Disk:{#DISKNAME}: I/O‘s currently in progress</name>
<width>900</width>
<height>200</height>
<yaxismin>0.0000</yaxismin>
<yaxismax>100.0000</yaxismax>
<show_work_period>1</show_work_period>
<show_triggers>1</show_triggers>
<type>0</type>
<show_legend>1</show_legend>
<show_3d>0</show_3d>
<percent_left>0.0000</percent_left>
<percent_right>0.0000</percent_right>
<ymin_type_1>0</ymin_type_1>
<ymax_type_1>0</ymax_type_1>
<ymin_item_1>0</ymin_item_1>
<ymax_item_1>0</ymax_item_1>
<graph_items>
<graph_item>
<sortorder>0</sortorder>
<drawtype>0</drawtype>
<color>00C800</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>custom.vfs.dev.io.active[{#DISKNAME}]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
<graph_prototype>
<name>Disk:{#DISKNAME}: IOPS reads and writes status</name>
<width>900</width>
<height>200</height>
<yaxismin>0.0000</yaxismin>
<yaxismax>100.0000</yaxismax>
<show_work_period>1</show_work_period>
<show_triggers>1</show_triggers>
<type>0</type>
<show_legend>1</show_legend>
<show_3d>0</show_3d>
<percent_left>0.0000</percent_left>
<percent_right>0.0000</percent_right>
<ymin_type_1>0</ymin_type_1>
<ymax_type_1>0</ymax_type_1>
<ymin_item_1>0</ymin_item_1>
<ymax_item_1>0</ymax_item_1>
<graph_items>
<graph_item>
<sortorder>0</sortorder>
<drawtype>5</drawtype>
<color>C80000</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>custom.vfs.dev.read.ops[{#DISKNAME}]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<drawtype>0</drawtype>
<color>0000C8</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>custom.vfs.dev.write.ops[{#DISKNAME}]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
<graph_prototype>
<name>Disk:{#DISKNAME}: iostat stats</name>
<width>900</width>
<height>200</height>
<yaxismin>0.0000</yaxismin>
<yaxismax>100.0000</yaxismax>
<show_work_period>1</show_work_period>
<show_triggers>1</show_triggers>
<type>0</type>
<show_legend>1</show_legend>
<show_3d>0</show_3d>
<percent_left>0.0000</percent_left>
<percent_right>0.0000</percent_right>
<ymin_type_1>0</ymin_type_1>
<ymax_type_1>0</ymax_type_1>
<ymin_item_1>0</ymin_item_1>
<ymax_item_1>0</ymax_item_1>
<graph_items>
<graph_item>
<sortorder>0</sortorder>
<drawtype>0</drawtype>
<color>C80000</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>iostat.util[{#DISKNAME}]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<drawtype>0</drawtype>
<color>00C800</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Template Linux Disk IO</host>
<key>iostat.await[{#DISKNAME}]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
</graph_prototypes>
<host_prototypes/>
<jmx_endpoint/>
</discovery_rule>
</discovery_rules>
<httptests/>
<macros/>
<templates/>
<screens/>
</template>
</templates>
</zabbix_export>

4、重启zabbix-agent

原文地址:https://www.cnblogs.com/Qing-840/p/9267821.html

时间: 2024-10-08 23:34:56

zabbix 自动发现 监控 硬盘读写 disk io的相关文章

zabbix自动发现监控多台mysql

1: 前言 由于前面写的博客,zabbix自动发现监控redis.zabbix自动发现监控mongo这两篇博客虽然都能自动发现并监控,但是由于每个zabbix_agentd都得配置,监控起来非常不方便,因此,再做了个模板,能够自动发现并监控多台mysql.这个方法在监控mongo.redis或者mysql主从等时候也都适用. 2: 在zabbix_agentd下编写自动发现并监控多台mysql的脚本,脚本如下: #!/bin/sh #zhuangweihong 20160512 zabbix d

zabbix自动发现监控mongo

1: zabbix自动发现mongo监控的端口,并返回zabbix_server需要的格式,脚本部署在zabbix_agent上,路径为/usr/local/zabbix/zabbix_discover_mongo.sh,脚本内容如下: #!/bin/sh #zhuangweihong 20160419 zabbix discover mysql res=`sudo ss -tulnp|grep mongo|grep -v 28107|awk '{print $(NF-2)}'|awk -F':

Zabbix自动发现监控Mysql数据库

结果图展现 先让大家看下结果图,Zabbix通过自动发现监控Mysql每秒的增.删.改.查,通过自动发现监控Mysql的流入流出流量,结果图如下: Mysql给Zabbix监控的权限 首先创建一个监控用户,usage on为比较小的权限,较安全.sql如下 grant usage on *.* to 'monitor'@'127.0.0.1' identified by 'shijiangepwd'; flush privileges; 测试监控用户是否生效,测试结果如下 mysql -umon

使用 zabbix 自动发现监控 MySQL

介绍 使用 zabbix 的 low-level 自动发现功能完成单主机多端口的监控, 详见low_level_discovery, 整体上监控类似 percona 的 zabbix 监控插件, 不过使用 mymonitor.pl 替换了脚本 ss_get_mysql_stats.php, 而且配置有点不同. 具体代码及配置详见: zabbix_mysql 1. 结构说明: zabbix_mysql/ |-- README.md |-- bin | |-- get_mysql_stats_wra

zabbix自动发现监控redis

1: 在zabbix_agentd端编写自动发现的脚本,主要是自动发现redis的监控端口,脚本如下: vim /usr/local/zabbix/zabbix_discover_redis.sh #!/bin/sh #zhuangweihong 20160419 zabbix discover redis res=`sudo ss -tulnp|grep redis|awk '{print $(NF-2)}'|awk -F':' '{print $(NF)}'|sort -u` if [[ -

zabbix自动发现监控tomcat健康页面

一.背景 公司系统平台有10几个tomcat服务,由于需要服务保持全年无间断服务,特在tomcat中嵌入专门的监控页面,在curl 专用页面返回值为200时,则认定服务正常运行,否则报警发送专门的监控运维的邮箱. 二.原理 正常的tomcat健康状态监控页面如下图 我们利用如下命令判断返回值是否为200 shell>/usr/bin/curl -o /dev/null -s --connect-timeout 5 -w '%{http_code}' http://10.0.0.107:9100/

zabbix 自动发现监控端口(六)

一.配置脚本,将脚本分发到各个minion端 1.1.脚本内容 vim discover_port.sh  #!/bin/bash portarray=(`sudo netstat -tnlp|egrep -i "$1"|awk {'print $4'}|awk -F':' '{if ($NF~/^[0-9]*$/) print $NF}'|sort|uniq`) length=${#portarray[@]} printf "{\n" printf  '\t'&q

zabbix自动发现监控url

1.在监控客户机上 web_site_code_status.sh: #!/bin/bash # function:monitor tcp connect status from zabbix source /etc/bashrc >/dev/null 2>&1 source /etc/profile >/dev/null 2>&1 #/usr/bin/curl -o /dev/null -s -w %{http_code} http://$1/ web_site_

zabbix自动发现规则之磁盘IO监控

本文章介绍zabbix自动发现规则之磁盘IO监控的案例实践. 主机名:mysql-slave 首先在zabbix_agent下定义一个获取磁盘IO信息的脚本: cat /etc/zabbix/scripts/disk_io.sh #!/bin/bash Disk=$1 Option=$2 case $Option in rrqm) iostat -dxk 1 2|grep "\b$Disk\b" |tail -1|awk '{print $2}' ;; wrqm) iostat -dx