3 # Common support functions for testing scripts. If a script-config
4 # files is available it will be sourced so in-tree kernel modules and
5 # utilities will be used. If no script-config can be found then the
6 # installed kernel modules and utilities will be used.
8 basedir="$(dirname $0)"
10 SCRIPT_CONFIG=zfs-script-config.sh
11 if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12 . "${basedir}/../${SCRIPT_CONFIG}"
14 KERNEL_MODULES=(zlib_deflate zlib_inflate)
15 MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
30 TESTS_RUN=${TESTS_RUN:-'*'}
31 TESTS_SKIP=${TESTS_SKIP:-}
34 exec_prefix=@exec_prefix@
35 libexecdir=@libexecdir@
36 pkglibexecdir=${libexecdir}/@PACKAGE@
40 udevruledir=@udevruledir@
41 sysconfdir=@sysconfdir@
43 ETCDIR=${ETCDIR:-/etc}
44 DEVDIR=${DEVDIR:-/dev/disk/zpool}
45 ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
46 ZPIOSDIR=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
47 ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
49 ZDB=${ZDB:-${sbindir}/zdb}
50 ZFS=${ZFS:-${sbindir}/zfs}
51 ZINJECT=${ZINJECT:-${sbindir}/zinject}
52 ZPOOL=${ZPOOL:-${sbindir}/zpool}
53 ZPOOL_ID=${ZPOOL_ID:-${bindir}/zpool_id}
54 ZTEST=${ZTEST:-${sbindir}/ztest}
55 ZPIOS=${ZPIOS:-${sbindir}/zpios}
57 COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
58 ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
59 ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
60 ZPIOS_SH=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
61 ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
63 LDMOD=${LDMOD:-/sbin/modprobe}
64 LSMOD=${LSMOD:-/sbin/lsmod}
65 RMMOD=${RMMOD:-/sbin/rmmod}
66 INFOMOD=${INFOMOD:-/sbin/modinfo}
67 LOSETUP=${LOSETUP:-/sbin/losetup}
68 MDADM=${MDADM:-/sbin/mdadm}
69 PARTED=${PARTED:-/sbin/parted}
70 BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
71 LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
72 SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
73 SYSCTL=${SYSCTL:-/sbin/sysctl}
74 UDEVADM=${UDEVADM:-/sbin/udevadm}
75 AWK=${AWK:-/usr/bin/awk}
77 COLOR_BLACK="\033[0;30m"
78 COLOR_DK_GRAY="\033[1;30m"
79 COLOR_BLUE="\033[0;34m"
80 COLOR_LT_BLUE="\033[1;34m"
81 COLOR_GREEN="\033[0;32m"
82 COLOR_LT_GREEN="\033[1;32m"
83 COLOR_CYAN="\033[0;36m"
84 COLOR_LT_CYAN="\033[1;36m"
85 COLOR_RED="\033[0;31m"
86 COLOR_LT_RED="\033[1;31m"
87 COLOR_PURPLE="\033[0;35m"
88 COLOR_LT_PURPLE="\033[1;35m"
89 COLOR_BROWN="\033[0;33m"
90 COLOR_YELLOW="\033[1;33m"
91 COLOR_LT_GRAY="\033[0;37m"
92 COLOR_WHITE="\033[1;37m"
96 echo -e "${PROG}: $1" >&2
101 if [ ${VERBOSE} ]; then
107 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
111 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
116 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
121 local MAX_DIR_SIZE=$2
122 local MAX_FILE_SIZE=$3
124 mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
128 COUNT=$(($RANDOM % $MAX_DIR_SIZE))
130 for i in `seq $COUNT`; do
131 FILE=`mktemp -p ${DIR}`
132 SIZE=$(($RANDOM % $MAX_FILE_SIZE))
133 dd if=/dev/urandom of=$FILE bs=1k count=$SIZE &>/dev/null
141 # Disable the udev rule 90-zfs.rules to prevent the zfs module
142 # stack from being loaded due to the detection of a zfs device.
143 # This is important because the test scripts require full control
144 # over when and how the modules are loaded/unloaded. A trap is
145 # set to ensure the udev rule is correctly replaced on exit.
146 local RULE=${udevruledir}/90-zfs.rules
147 if test -e ${RULE}; then
148 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
149 mv ${RULE} ${RULE}.disabled
152 # Create a random directory tree of files and sub-directories to
153 # to act as a copy source for the various regression tests.
154 SRC_DIR=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
155 trap "rm -Rf $SRC_DIR" INT TERM EXIT
156 populate $SRC_DIR 10 100
160 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
161 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
162 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
164 echo "Dumped debug log: ${NAME}.log"
171 local LOADED_MODULES=()
172 local MISSING_MODULES=()
174 for MOD in ${MODULES[*]}; do
175 local NAME=`basename $MOD .ko`
177 if ${LSMOD} | egrep -q "^${NAME}"; then
178 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
181 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
182 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
186 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
187 ERROR="Unload these modules with '${PROG} -u':\n"
188 ERROR="${ERROR}${LOADED_MODULES[*]}"
192 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
193 ERROR="The following modules can not be found,"
194 ERROR="${ERROR} ensure your source trees are built:\n"
195 ERROR="${ERROR}${MISSING_MODULES[*]}"
203 local NAME=`basename $1 .ko`
205 if [ ${VERBOSE} ]; then
206 echo "Loading ${NAME} ($@)"
209 ${LDMOD} $* &>/dev/null || ERROR="Failed to load $1" return 1
217 for MOD in ${KERNEL_MODULES[*]}; do
221 for MOD in ${MODULES[*]}; do
222 local NAME=`basename ${MOD} .ko`
226 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
228 if [ ${NAME} = "${OPT_NAME}" ]; then
229 VALUE=`echo ${OPT} | cut -f2- -d'='`
233 load_module ${MOD} ${VALUE} || return 1
236 if [ ${VERBOSE} ]; then
237 echo "Successfully loaded ZFS module stack"
244 local NAME=`basename $1 .ko`
246 if [ ${VERBOSE} ]; then
247 echo "Unloading ${NAME} ($@)"
250 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
256 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
257 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
259 for MOD in ${MODULES_REVERSE[*]}; do
260 local NAME=`basename ${MOD} .ko`
261 local USE_COUNT=`${LSMOD} |
262 egrep "^${NAME} "| ${AWK} '{print $3}'`
264 if [ "${USE_COUNT}" = 0 ] ; then
266 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
270 unload_module ${MOD} || return 1
274 if [ ${VERBOSE} ]; then
275 echo "Successfully unloaded ZFS module stack"
282 # Check that the mdadm utilities are installed.
285 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
290 # Find and return an unused loopback device.
292 unused_loop_device() {
293 for DEVICE in `ls -1 /dev/loop[0-9]* 2>/dev/null`; do
294 ${LOSETUP} ${DEVICE} &>/dev/null
295 if [ $? -ne 0 ]; then
301 die "Error: Unable to find unused loopback device"
305 # This can be slightly dangerous because the loop devices we are
306 # cleaning up may not be ours. However, if the devices are currently
307 # in use we will not be able to remove them, and we only remove
308 # devices which include 'zpool' in the name. So any damage we might
309 # do should be limited to other zfs related testing.
311 cleanup_loop_devices() {
312 local TMP_FILE=`mktemp`
314 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
315 ${AWK} -F":" -v losetup="$LOSETUP" \
316 '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
317 ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
323 # Destroy the passed loopback devices, this is used when you know
324 # the names of the loopback devices.
326 destroy_loop_devices() {
329 msg "Destroying ${LODEVICES}"
330 ${LOSETUP} -d ${LODEVICES} || \
331 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
338 # Create a device label.
344 ${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 1
350 # Create a primary partition on a block device.
358 ${PARTED} --align optimal ${DEVICE} --script -- \
359 mkpart ${TYPE} ${START} ${END} || return 1
366 # Create a filesystem on the block device
372 # Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
374 /sbin/mkfs.${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev/null || return 1
380 # Check that the mdadm utilities are installed.
383 test -f ${MDADM} || die "${MDADM} utility must be installed"
384 test -f ${PARTED} || die "${PARTED} utility must be installed"
387 check_md_partitionable() {
388 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
389 local LODEVICE=`unused_loop_device`
390 local MDDEVICE=`unused_md_device`
396 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
397 &>/dev/null || return ${RESULT}
399 msg "Creating ${LODEVICE} using ${LOFILE}"
400 ${LOSETUP} ${LODEVICE} ${LOFILE}
401 if [ $? -ne 0 ]; then
406 msg "Creating ${MDDEVICE} using ${LODEVICE}"
407 ${MDADM} --build ${MDDEVICE} --level=faulty \
408 --raid-devices=1 ${LODEVICE} &>/dev/null
409 if [ $? -ne 0 ]; then
410 destroy_loop_devices ${LODEVICE}
414 wait_udev ${MDDEVICE} 30
416 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
419 destroy_md_devices ${MDDEVICE}
420 destroy_loop_devices ${LODEVICE}
427 # Find and return an unused md device.
430 for (( i=0; i<32; i++ )); do
433 # Skip active devicesudo in /proc/mdstat.
434 grep -q "${MDDEVICE} " /proc/mdstat && continue
436 # Device doesn't exist, use it.
437 if [ ! -e $/dev/{MDDEVICE} ]; then
438 echo /dev/${MDDEVICE}
442 # Device exists but may not be in use.
443 if [ -b /dev/${MDDEVICE} ]; then
444 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
445 if [ $? -eq 1 ]; then
446 echo /dev/${MDDEVICE}
452 die "Error: Unable to find unused md device"
456 # This can be slightly dangerous because it is possible the md devices
457 # we are cleaning up may not be ours. However, if the devices are
458 # currently in use we will not be able to remove them, and even if
459 # we remove devices which were not out we do not zero the super block
460 # so you should be able to reconstruct them.
462 cleanup_md_devices() {
463 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
468 # Destroy the passed md devices, this is used when you know
469 # the names of the md devices.
471 destroy_md_devices() {
474 msg "Destroying ${MDDEVICES}"
475 for MDDEVICE in ${MDDEVICES}; do
476 ${MDADM} --stop ${MDDEVICE} &>/dev/null
477 ${MDADM} --remove ${MDDEVICE} &>/dev/null
478 ${MDADM} --detail ${MDDEVICE} &>/dev/null
485 # Check that the scsi utilities are installed.
488 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
489 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
493 # Rescan the scsi bus for scsi_debug devices. It is preferable to use the
494 # scsi-rescan tool if it is installed, but if it's not we can fall back to
495 # removing and readding the device manually. This rescan will only effect
496 # the first scsi_debug device if scsi-rescan is missing.
499 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
501 if [ -f ${SCSIRESCAN} ]; then
502 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
504 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
505 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
506 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
508 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
514 # Trigger udev and wait for it to settle.
517 if [ -f ${UDEVADM} ]; then
518 ${UDEVADM} trigger --action=change --subsystem-match=block
527 # The following udev helper functions assume that the provided
528 # udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
529 # disk mapping. In this mapping each CHANNEL is represented by
530 # the letters a-z, and the RANK is represented by the numbers
531 # 1-n. A CHANNEL should identify a group of RANKS which are all
532 # attached to a single controller, each RANK represents a disk.
533 # This provides a simply mechanism to locate a specific drive
534 # given a known hardware configuration.
539 # When running in tree manually contruct symlinks in tree to
540 # the proper devices. Symlinks are installed for all entires
541 # in the config file regardless of if that device actually
542 # exists. When installed as a package udev can be relied on for
543 # this and it will only create links for devices which exist.
544 if [ ${INTREE} ]; then
548 ${AWK} '!/^#/ && /./ { system( \
549 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
550 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
551 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
555 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
556 DST_PATH=/etc/zfs/${DST_FILE}
558 if [ -e ${DST_PATH} ]; then
559 die "Error: Config ${DST_PATH} already exists"
562 cp ${SRC_PATH} ${DST_PATH}
572 if [ ${INTREE} ]; then
575 ${AWK} '!/^#/ && /./ { system( \
576 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
584 local CHANNEL=`echo "obase=16; $1+96" | bc`
587 printf "\x${CHANNEL}${RANK}"
596 for RANK in `seq 1 ${RANKS}`; do
597 for CHANNEL in `seq 1 ${CHANNELS}`; do
598 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
599 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
607 udev_raid10_setup() {
613 for RANK in `seq 1 ${RANKS}`; do
614 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
615 let CHANNEL2=CHANNEL1+1
616 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
617 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
618 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
619 RAID10S[${IDX}]="mirror ${GROUP}"
632 for RANK in `seq 1 ${RANKS}`; do
635 for CHANNEL in `seq 1 ${CHANNELS}`; do
636 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
637 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
640 RAIDZS[${RANK}]="${RAIDZ[*]}"
646 udev_raidz2_setup() {
651 for RANK in `seq 1 ${RANKS}`; do
654 for CHANNEL in `seq 1 ${CHANNELS}`; do
655 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
656 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
659 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
669 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
677 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
685 for i in ${TESTS_SKIP[@]}; do
686 if [[ $i == ${TEST_NUM} ]] ; then
687 skip_one_test ${TEST_NUM} "${TEST_NAME}"
692 if [ "${TESTS_RUN[0]}" = "*" ]; then
693 run_one_test ${TEST_NUM} "${TEST_NAME}"
695 for i in ${TESTS_RUN[@]}; do
696 if [[ $i == ${TEST_NUM} ]] ; then
697 run_one_test ${TEST_NUM} "${TEST_NAME}"
702 skip_one_test ${TEST_NUM} "${TEST_NAME}"
712 while [ ! -e ${DEVICE} ]; do
713 if [ ${COUNT} -gt ${DELAY} ]; then
725 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
726 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
728 if [ -e $STACK_MAX_SIZE ]; then
729 echo 1 >$STACK_TRACER_ENABLED
730 echo 0 >$STACK_MAX_SIZE
735 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
736 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
737 local STACK_LIMIT=7000
739 if [ -e $STACK_MAX_SIZE ]; then
740 STACK_SIZE=`cat $STACK_MAX_SIZE`
742 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
744 echo "Warning: max stack size $STACK_SIZE bytes"