Add zfault zpool configurations and tests
[zfs.git] / scripts / common.sh.in
1 #!/bin/bash
2 #
3 # Common support functions for testing scripts.  If a script-config
4 # files is available it will be sourced so in-tree kernel modules and
5 # utilities will be used.  If no script-config can be found then the
6 # installed kernel modules and utilities will be used.
7
8 basedir="$(dirname $0)"
9
10 SCRIPT_CONFIG=zfs-script-config.sh
11 if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12 . "${basedir}/../${SCRIPT_CONFIG}"
13 else
14 MODULES=(zlib_deflate spl splat zavl znvpair zunicode zcommon zfs)
15 fi
16
17 PROG="<define PROG>"
18 CLEANUP=
19 VERBOSE=
20 VERBOSE_FLAG=
21 FORCE=
22 FORCE_FLAG=
23 DUMP_LOG=
24 ERROR=
25 RAID0S=()
26 RAID10S=()
27 RAIDZS=()
28 RAIDZ2S=()
29 TESTS_RUN=${TESTS_RUN:-'*'}
30 TESTS_SKIP=${TESTS_SKIP:-}
31
32 prefix=@prefix@
33 exec_prefix=@exec_prefix@
34 libexecdir=@libexecdir@
35 pkglibexecdir=${libexecdir}/@PACKAGE@
36 bindir=@bindir@
37 sbindir=@sbindir@
38
39 ETCDIR=${ETCDIR:-/etc}
40 DEVDIR=${DEVDIR:-/dev/disk/zpool}
41 ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
42 ZPIOSDIR=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
43 ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
44
45 ZDB=${ZDB:-${sbindir}/zdb}
46 ZFS=${ZFS:-${sbindir}/zfs}
47 ZINJECT=${ZINJECT:-${sbindir}/zinject}
48 ZPOOL=${ZPOOL:-${sbindir}/zpool}
49 ZPOOL_ID=${ZPOOL_ID:-${bindir}/zpool_id}
50 ZTEST=${ZTEST:-${sbindir}/ztest}
51 ZPIOS=${ZPIOS:-${sbindir}/zpios}
52
53 COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
54 ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
55 ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
56 ZPIOS_SH=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
57 ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
58
59 LDMOD=${LDMOD:-/sbin/modprobe}
60 LSMOD=${LSMOD:-/sbin/lsmod}
61 RMMOD=${RMMOD:-/sbin/rmmod}
62 INFOMOD=${INFOMOD:-/sbin/modinfo}
63 LOSETUP=${LOSETUP:-/sbin/losetup}
64 MDADM=${MDADM:-/sbin/mdadm}
65 PARTED=${PARTED:-/sbin/parted}
66 BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
67 LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
68 SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
69 SYSCTL=${SYSCTL:-/sbin/sysctl}
70 UDEVADM=${UDEVADM:-/sbin/udevadm}
71 AWK=${AWK:-/usr/bin/awk}
72
73 COLOR_BLACK="\033[0;30m"
74 COLOR_DK_GRAY="\033[1;30m"
75 COLOR_BLUE="\033[0;34m"
76 COLOR_LT_BLUE="\033[1;34m" 
77 COLOR_GREEN="\033[0;32m"
78 COLOR_LT_GREEN="\033[1;32m"
79 COLOR_CYAN="\033[0;36m"
80 COLOR_LT_CYAN="\033[1;36m"
81 COLOR_RED="\033[0;31m"
82 COLOR_LT_RED="\033[1;31m"
83 COLOR_PURPLE="\033[0;35m"
84 COLOR_LT_PURPLE="\033[1;35m"
85 COLOR_BROWN="\033[0;33m"
86 COLOR_YELLOW="\033[1;33m"
87 COLOR_LT_GRAY="\033[0;37m"
88 COLOR_WHITE="\033[1;37m"
89 COLOR_RESET="\033[0m"
90
91 die() {
92         echo -e "${PROG}: $1" >&2
93         exit 1
94 }
95
96 msg() {
97         if [ ${VERBOSE} ]; then
98                 echo "$@"
99         fi
100 }
101
102 pass() {
103         echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
104 }
105
106 fail() {
107         echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
108         exit $1
109 }
110
111 skip() {
112         echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
113 }
114
115 spl_dump_log() {
116         ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
117         local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
118         ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
119         echo
120         echo "Dumped debug log: ${NAME}.log"
121         tail -n1 ${NAME}.log
122         echo
123         return 0
124 }
125
126 check_modules() {
127         local LOADED_MODULES=()
128         local MISSING_MODULES=()
129
130         for MOD in ${MODULES[*]}; do
131                 local NAME=`basename $MOD .ko`
132
133                 if ${LSMOD} | egrep -q "^${NAME}"; then
134                         LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
135                 fi
136
137                 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
138                         MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
139                 fi
140         done
141
142         if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
143                 ERROR="Unload these modules with '${PROG} -u':\n"
144                 ERROR="${ERROR}${LOADED_MODULES[*]}"
145                 return 1
146         fi
147
148         if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
149                 ERROR="The following modules can not be found,"
150                 ERROR="${ERROR} ensure your source trees are built:\n"
151                 ERROR="${ERROR}${MISSING_MODULES[*]}"
152                 return 1
153         fi
154
155         return 0
156 }
157
158 load_module() {
159         local NAME=`basename $1 .ko`
160
161         if [ ${VERBOSE} ]; then
162                 echo "Loading ${NAME} ($@)"
163         fi
164
165         ${LDMOD} $* || ERROR="Failed to load $1" return 1
166
167         return 0
168 }
169
170 load_modules() {
171         mkdir -p /etc/zfs
172
173         for MOD in ${MODULES[*]}; do
174                 local NAME=`basename ${MOD} .ko`
175                 local VALUE=
176
177                 for OPT in "$@"; do
178                         OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
179
180                         if [ ${NAME} = "${OPT_NAME}" ]; then
181                                 VALUE=`echo ${OPT} | cut -f2- -d'='`
182                         fi
183                 done
184
185                 load_module ${MOD} ${VALUE} || return 1
186         done
187
188         if [ ${VERBOSE} ]; then
189                 echo "Successfully loaded ZFS module stack"
190         fi
191
192         return 0
193 }
194
195 unload_module() {
196         local NAME=`basename $1 .ko`
197
198         if [ ${VERBOSE} ]; then
199                 echo "Unloading ${NAME} ($@)"
200         fi
201
202         ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
203
204         return 0
205 }
206
207 unload_modules() {
208         local MODULES_REVERSE=( $(echo ${MODULES[@]} |
209                 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
210
211         for MOD in ${MODULES_REVERSE[*]}; do
212                 local NAME=`basename ${MOD} .ko`
213                 local USE_COUNT=`${LSMOD} |
214                                 egrep "^${NAME} "| ${AWK} '{print $3}'`
215
216                 if [ "${USE_COUNT}" = 0 ] ; then
217
218                         if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
219                                 spl_dump_log
220                         fi
221
222                         unload_module ${MOD} || return 1
223                 fi
224         done
225
226         if [ ${VERBOSE} ]; then
227                 echo "Successfully unloaded ZFS module stack"
228         fi
229
230         return 0
231 }
232
233 #
234 # Check that the mdadm utilities are installed.
235 #
236 check_loop_utils() {
237         test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
238 }
239
240
241 #
242 # Find and return an unused loopback device.
243 #
244 unused_loop_device() {
245         for DEVICE in `ls -1 /dev/loop* 2>/dev/null`; do
246                 ${LOSETUP} ${DEVICE} &>/dev/null
247                 if [ $? -ne 0 ]; then
248                         echo ${DEVICE}
249                         return
250                 fi
251         done
252
253         die "Error: Unable to find unused loopback device"
254 }
255
256 #
257 # This can be slightly dangerous because the loop devices we are
258 # cleaning up may not be ours.  However, if the devices are currently
259 # in use we will not be able to remove them, and we only remove
260 # devices which include 'zpool' in the name.  So any damage we might
261 # do should be limited to other zfs related testing.
262 #
263 cleanup_loop_devices() {
264         local TMP_FILE=`mktemp`
265
266         ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
267         ${AWK} -F":" -v losetup="$LOSETUP" \
268             '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
269         ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
270
271         rm -f ${TMP_FILE}
272 }
273
274 #
275 # Destroy the passed loopback devices, this is used when you know
276 # the names of the loopback devices.
277 #
278 destroy_loop_devices() {
279         local LODEVICES="$1"
280
281         msg "Destroying ${LODEVICES}"
282          ${LOSETUP} -d ${LODEVICES} || \
283                 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
284
285         rm -f ${FILES}
286         return 0
287 }
288
289 #
290 # Check that the mdadm utilities are installed.
291 #
292 check_md_utils() {
293         test -f ${MDADM} || die "${MDADM} utility must be installed"
294         test -f ${PARTED} || die "${PARTED} utility must be installed"
295 }
296
297 check_md_partitionable() {
298         local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
299         local LODEVICE=`unused_loop_device`
300         local MDDEVICE=`unused_md_device`
301         local RESULT=1
302
303         check_md_utils
304
305         rm -f ${LOFILE}
306         dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
307                 &>/dev/null || return ${RESULT}
308
309         msg "Creating ${LODEVICE} using ${LOFILE}"
310         ${LOSETUP} ${LODEVICE} ${LOFILE}
311         if [ $? -ne 0 ]; then
312                 rm -f ${LOFILE}
313                 return ${RESULT}
314         fi
315
316         msg "Creating ${MDDEVICE} using ${LODEVICE}"
317         ${MDADM} --build ${MDDEVICE} --level=faulty \
318                 --raid-devices=1 ${LODEVICE} &>/dev/null
319         if [ $? -ne 0 ]; then
320                 destroy_loop_devices ${LODEVICE}
321                 rm -f ${LOFILE}
322                 return ${RESULT}
323         fi
324         wait_udev ${MDDEVICE} 30
325
326         ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
327         RESULT=$?
328
329         destroy_md_devices ${MDDEVICE}
330         destroy_loop_devices ${LODEVICE}
331         rm -f ${LOFILE}
332
333         return ${RESULT}
334 }
335
336 #
337 # Find and return an unused md device.
338 #
339 unused_md_device() {
340         for (( i=0; i<32; i++ )); do
341                 MDDEVICE=md${i}
342
343                 # Skip active devicesudo in /proc/mdstat.
344                 grep -q "${MDDEVICE} " /proc/mdstat && continue
345
346                 # Device doesn't exist, use it.
347                 if [ ! -e $/dev/{MDDEVICE} ]; then
348                         echo /dev/${MDDEVICE}
349                         return
350                 fi
351
352                 # Device exists but may not be in use.
353                 if [ -b /dev/${MDDEVICE} ]; then
354                         ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
355                         if [ $? -eq 1 ]; then
356                                 echo /dev/${MDDEVICE}
357                                 return
358                         fi
359                 fi
360         done
361
362         die "Error: Unable to find unused md device"
363 }
364
365 #
366 # This can be slightly dangerous because it is possible the md devices
367 # we are cleaning up may not be ours.  However, if the devices are
368 # currently in use we will not be able to remove them, and even if
369 # we remove devices which were not out we do not zero the super block
370 # so you should be able to reconstruct them.
371 #
372 cleanup_md_devices() {
373         destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
374         udev_trigger
375 }
376
377 #
378 # Destroy the passed md devices, this is used when you know
379 # the names of the md devices.
380 #
381 destroy_md_devices() {
382         local MDDEVICES="$1"
383
384         msg "Destroying ${MDDEVICES}"
385         for MDDEVICE in ${MDDEVICES}; do
386                 ${MDADM} --stop ${MDDEVICE} &>/dev/null
387                 ${MDADM} --remove ${MDDEVICE} &>/dev/null
388                 ${MDADM} --detail ${MDDEVICE} &>/dev/null
389         done
390
391         return 0
392 }
393
394 #
395 # Check that the scsi utilities are installed.
396 #
397 check_sd_utils() {
398         ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
399         test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
400 }
401
402 #
403 # Rescan the scsi bus for scsi_debug devices.  It is preferable to use the
404 # scsi-rescan tool if it is installed, but if it's not we can fall back to
405 # removing and readding the device manually.  This rescan will only effect
406 # the first scsi_debug device if scsi-rescan is missing.
407 #
408 scsi_rescan() {
409         local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
410
411         if [ -f ${SCSIRESCAN} ]; then
412                 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
413         else
414                 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
415                 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
416                 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
417                 udev_trigger
418                 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
419                 udev_trigger
420         fi
421 }
422
423 #
424 # Trigger udev and wait for it to settle.
425 #
426 udev_trigger() {
427         if [ -f ${UDEVADM} ]; then
428                 ${UDEVADM} trigger
429                 ${UDEVADM} settle
430         else
431                 /sbin/udevtrigger
432                 /sbin/udevsettle
433         fi
434 }
435
436 #
437 # The following udev helper functions assume that the provided
438 # udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
439 # disk mapping.  In this mapping each CHANNEL is represented by
440 # the letters a-z, and the RANK is represented by the numbers
441 # 1-n.  A CHANNEL should identify a group of RANKS which are all
442 # attached to a single controller, each RANK represents a disk.
443 # This provides a simply mechanism to locate a specific drive
444 # given a known hardware configuration.
445 #
446 udev_setup() {
447         local SRC_PATH=$1
448
449         # When running in tree manually contruct symlinks in tree to
450         # the proper devices.  Symlinks are installed for all entires
451         # in the config file regardless of if that device actually
452         # exists.  When installed as a package udev can be relied on for
453         # this and it will only create links for devices which exist.
454         if [ ${INTREE} ]; then
455                 PWD=`pwd`
456                 mkdir -p ${DEVDIR}/
457                 cd ${DEVDIR}/
458                 ${AWK} '!/^#/ && /./ { system( \
459                         "ln -f -s /dev/disk/by-path/"$2" "$1";" \
460                         "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
461                         "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
462                         ) }' $SRC_PATH
463                 cd ${PWD}
464         else
465                 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
466                 DST_PATH=/etc/zfs/${DST_FILE}
467
468                 if [ -e ${DST_PATH} ]; then
469                         die "Error: Config ${DST_PATH} already exists"
470                 fi
471
472                 cp ${SRC_PATH} ${DST_PATH}
473                 udev_trigger
474         fi
475
476         return 0
477 }
478
479 udev_cleanup() {
480         local SRC_PATH=$1
481
482         if [ ${INTREE} ]; then
483                 PWD=`pwd`
484                 cd ${DEVDIR}/
485                 ${AWK} '!/^#/ && /./ { system( \
486                         "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
487                 cd ${PWD}
488         fi
489
490         return 0
491 }
492
493 udev_cr2d() {
494         local CHANNEL=`echo "obase=16; $1+96" | bc`
495         local RANK=$2
496
497         printf "\x${CHANNEL}${RANK}"
498 }
499
500 udev_raid0_setup() {
501         local RANKS=$1
502         local CHANNELS=$2
503         local IDX=0
504
505         RAID0S=()
506         for RANK in `seq 1 ${RANKS}`; do
507                 for CHANNEL in `seq 1 ${CHANNELS}`; do
508                         DISK=`udev_cr2d ${CHANNEL} ${RANK}`
509                         RAID0S[${IDX}]="${DEVDIR}/${DISK}"
510                         let IDX=IDX+1
511                 done
512         done
513
514         return 0
515 }
516
517 udev_raid10_setup() {
518         local RANKS=$1
519         local CHANNELS=$2
520         local IDX=0
521
522         RAID10S=()
523         for RANK in `seq 1 ${RANKS}`; do
524                 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
525                         let CHANNEL2=CHANNEL1+1
526                         DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
527                         DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
528                         GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
529                         RAID10S[${IDX}]="mirror ${GROUP}"
530                         let IDX=IDX+1
531                 done
532         done
533
534         return 0
535 }
536
537 udev_raidz_setup() {
538         local RANKS=$1
539         local CHANNELS=$2
540
541         RAIDZS=()
542         for RANK in `seq 1 ${RANKS}`; do
543                 RAIDZ=("raidz")
544
545                 for CHANNEL in `seq 1 ${CHANNELS}`; do
546                         DISK=`udev_cr2d ${CHANNEL} ${RANK}`
547                         RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
548                 done
549
550                 RAIDZS[${RANK}]="${RAIDZ[*]}"
551         done
552
553         return 0
554 }
555
556 udev_raidz2_setup() {
557         local RANKS=$1
558         local CHANNELS=$2
559
560         RAIDZ2S=()
561         for RANK in `seq 1 ${RANKS}`; do
562                 RAIDZ2=("raidz2")
563
564                 for CHANNEL in `seq 1 ${CHANNELS}`; do
565                         DISK=`udev_cr2d ${CHANNEL} ${RANK}`
566                         RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
567                 done
568
569                 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
570         done
571
572         return 0
573 }
574
575 run_one_test() {
576         local TEST_NUM=$1
577         local TEST_NAME=$2
578
579         printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
580         test_${TEST_NUM}
581 }
582
583 skip_one_test() {
584         local TEST_NUM=$1
585         local TEST_NAME=$2
586
587         printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
588         skip
589 }
590
591 run_test() {
592         local TEST_NUM=$1
593         local TEST_NAME=$2
594
595         for i in ${TESTS_SKIP[@]}; do
596                 if [[ $i == ${TEST_NUM} ]] ; then
597                         skip_one_test ${TEST_NUM} "${TEST_NAME}"
598                         return 0
599                 fi
600         done
601
602         if [ "${TESTS_RUN[0]}" = "*" ]; then
603                 run_one_test ${TEST_NUM} "${TEST_NAME}"
604         else
605                 for i in ${TESTS_RUN[@]}; do
606                         if [[ $i == ${TEST_NUM} ]] ; then
607                                 run_one_test ${TEST_NUM} "${TEST_NAME}"
608                                 return 0
609                         fi
610                 done
611
612                 skip_one_test ${TEST_NUM} "${TEST_NAME}"
613         fi
614 }
615
616 wait_udev() {
617         local DEVICE=$1
618         local DELAY=$2
619         local COUNT=0
620
621         udev_trigger
622         while [ ! -e ${DEVICE} ]; do
623                 if [ ${COUNT} -gt ${DELAY} ]; then
624                         return 1
625                 fi
626
627                 let COUNT=${COUNT}+1
628                 sleep 1
629         done
630
631         return 0
632 }