configuration-templates – Blame information for rev 168

Subversion Repositories:
Rev:
Rev Author Line No. Line
168 office 1 #! /bin/bash
2 #
3 ## ZFS health check script for monit.
4 ## v1.0.2
5 #
6 ## Should be compatible with FreeBSD and Linux. Tested on Ubuntu.
7 ## If you want to use it on FreeBSD then go to Scrub Expired section and Trim Expired section
8 ## and comment two Ubuntu date lines and uncomment two FreeBSD lines in Scrub Expired section.
9 ## In Trim Expired section adjust the date format directly in the for loop's awk parameter.
10 #
11 ## Assumed usage in monitrc (where 80 is max capacity in percentages
12 ## and 691200 is scrub and trim expiration in seconds):
13 ## check program zfs_health with path "/path/to/this/script 80 691200"
14 ## if status != 0 then alert
15 #
16 ## Scrub and Trim share the same expiration threshold for the backward compatibility reasons.
17 #
18 ## Original script from:
19 ## Calomel.org
20 ## https://calomel.org/zfs_health_check_script.html
21 ## FreeBSD ZFS Health Check script
22 ## zfs_health.sh @ Version 0.17
23 #
24 ## Main difference from the original script is that this one exits
25 ## with a return code instead of sending an e-mail
26  
27 # Parameters
28  
29 maxCapacity=$1 # in percentages
30 scrubExpire=$2 # in seconds (691200 = 8 days)
31 trimExpire=$2 # in seconds (691200 = 8 days)
32  
33 usage="Usage: $0 maxCapacityInPercentages scrubExpireInSeconds\n"
34  
35 if [ ! "${maxCapacity}" ]; then
36 printf "Missing arguments\n"
37 printf "${usage}"
38 exit 1
39 fi
40  
41 if [ ! "${scrubExpire}" ]; then
42 printf "Missing second argument\n"
43 printf "${usage}"
44 exit 1
45 fi
46  
47  
48 # Output for monit user interface
49  
50 printf "==== ZPOOL STATUS ====\n"
51 printf "$(/sbin/zpool status)"
52 printf "\n\n==== ZPOOL LIST ====\n"
53 printf "%s\n" "$(/sbin/zpool list)"
54  
55  
56 # Health - Check if all zfs volumes are in good condition. We are looking for
57 # any keyword signifying a degraded or broken array.
58  
59 condition=$(/sbin/zpool status | grep -E 'DEGRADED|FAULTED|OFFLINE|UNAVAIL|REMOVED|FAIL|DESTROYED|corrupt|cannot|unrecover')
60  
61 if [ "${condition}" ]; then
62 printf "\n==== ERROR ====\n"
63 printf "One of the pools is in one of these statuses: DEGRADED|FAULTED|OFFLINE|UNAVAIL|REMOVED|FAIL|DESTROYED|corrupt|cannot|unrecover!\n"
64 printf "$condition"
65 exit 1
66 fi
67  
68  
69 # Capacity - Make sure the pool capacity is below 80% for best performance. The
70 # percentage really depends on how large your volume is. If you have a 128GB
71 # SSD then 80% is reasonable. If you have a 60TB raid-z2 array then you can
72 # probably set the warning closer to 95%.
73 #
74 # ZFS uses a copy-on-write scheme. The file system writes new data to
75 # sequential free blocks first and when the uberblock has been updated the new
76 # inode pointers become valid. This method is true only when the pool has
77 # enough free sequential blocks. If the pool is at capacity and space limited,
78 # ZFS will be have to randomly write blocks. This means ZFS can not create an
79 # optimal set of sequential writes and write performance is severely impacted.
80  
81 capacity=$(/sbin/zpool list -H -o capacity | cut -d'%' -f1)
82  
83 for line in ${capacity}
84 do
85 if [ $line -ge $maxCapacity ]; then
86 printf "\n==== ERROR ====\n"
87 printf "One of the pools has reached it's max capacity!"
88 exit 1
89 fi
90 done
91  
92  
93 # Errors - Check the columns for READ, WRITE and CKSUM (checksum) drive errors
94 # on all volumes and all drives using "zpool status". If any non-zero errors
95 # are reported an email will be sent out. You should then look to replace the
96 # faulty drive and run "zpool scrub" on the affected volume after resilvering.
97  
98 errors=$(/sbin/zpool status | grep ONLINE | grep -v state | awk '{print $3 $4 $5}' | grep -v 000)
99  
100 if [ "${errors}" ]; then
101 printf "\n==== ERROR ====\n"
102 printf "One of the pools contains errors!"
103 printf "$errors"
104 exit 1
105 fi
106  
107  
108 # Scrub Expired - Check if all volumes have been scrubbed in at least the last
109 # 8 days. The general guide is to scrub volumes on desktop quality drives once
110 # a week and volumes on enterprise class drives once a month. You can always
111 # use cron to schedule "zpool scrub" in off hours. We scrub our volumes every
112 # Sunday morning for example.
113 #
114 # Check your /etc/cron.d/zfsutils_linux for any already scheduled jobs
115 #
116 # Scrubbing traverses all the data in the pool once and verifies all blocks can
117 # be read. Scrubbing proceeds as fast as the devices allows, though the
118 # priority of any I/O remains below that of normal calls. This operation might
119 # negatively impact performance, but the file system will remain usable and
120 # responsive while scrubbing occurs. To initiate an explicit scrub, use the
121 # "zpool scrub" command.
122 #
123 # The scrubExpire variable is in seconds.
124  
125 currentDate=$(date +%s)
126 zfsVolumes=$(/sbin/zpool list -H -o name)
127  
128 for volume in ${zfsVolumes}
129 do
130 if [ $(/sbin/zpool status $volume | grep -E -c "none requested") -ge 1 ]; then
131 printf "\n==== ERROR ====\n"
132 printf "ERROR: You need to run \"zpool scrub $volume\" before this script can monitor the scrub expiration time."
133 break
134 fi
135  
136 if [ $(/sbin/zpool status $volume | grep -E -c "scrub in progress|resilver") -ge 1 ]; then
137 break
138 fi
139  
140 ### Ubuntu with GNU supported date format - compatible with ZFS v2.0.3 output
141 scrubRawDate=$(/sbin/zpool status $volume | grep scrub | awk '{print $11" "$12" " $13" " $14" "$15}')
142 scrubDate=$(date -d "$scrubRawDate" +%s)
143  
144 ### FreeBSD with *nix supported date format
145 #scrubRawDate=$(/sbin/zpool status $volume | grep scrub | awk '{print $15 $12 $13}')
146 #scrubDate=$(date -j -f '%Y%b%e-%H%M%S' $scrubRawDate'-000000' +%s)
147  
148 if [ $(($currentDate - $scrubDate)) -ge $scrubExpire ]; then
149 printf "\n==== ERROR ====\n"
150 printf "${volume}'s scrub date is too far in the past!"
151 exit 1
152 fi
153 done
154  
155 # TRIM Expired - Check if all volumes have been trimmed in at least the last
156 # 8 days. The general guide is to manually trim volumes on desktop quality drives once
157 # a week and volumes on enterprise class drives once a month. You can always
158 # use cron to schedule "zpool trim" in off hours. We trim our volumes every
159 # Sunday morning for example.
160 #
161 # Check your /etc/cron.d/zfsutils_linux for any already scheduled jobs
162 #
163 # Manual trimming is recommended even though autotrim feature is turned on for your pool.
164 # From ZFS documentation:
165 # > Since the automatic TRIM will skip ranges it considers too small there is value in occasionally
166 # > running a full zpool trim. This may occur when the freed blocks are small and not enough time
167 # > was allowed to aggregate them. An automatic TRIM and a manual zpool trim may be run concurrently,
168 # > in which case the automatic TRIM will yield to the manual TRIM.
169  
170 for volume in ${zfsVolumes}
171 do
172 if [ $(/sbin/zpool status -t $volume | grep -E -c "trim unsupported") -ge 1 ]; then
173 break
174 fi
175  
176 ### Ubuntu with GNU supported date format - compatible with ZFS v2.0.3 output - For other systems and version adjust awk parameter below
177 trimRawDates=$(/sbin/zpool status -t $volume | grep trim | awk '{print $10" "$11" " $12" " $13" "$14}')
178  
179 while IFS= read -r trimRawDate
180 do
181 trimDate=$(date -d "$trimRawDate" +%s)
182  
183 if [ $(($currentDate - $trimDate)) -ge $trimExpire ]; then
184 printf "\n==== ERROR ====\n"
185 printf "${volume}'s trim date is too far in the past!"
186 exit 1
187 fi
188 done <<< "$trimRawDates"
189 done
190  
191 # Finish - If we made it here then everything is fine
192 exit 0