usage() {
cat << EOF
USAGE:
-$0 [hvc]
+$0 [hvcts]
DESCRIPTION:
ZFS/ZPOOL configuration tests
-h Show this message
-v Verbose
-c Cleanup lo+file devices at start
+ -t <#> Run listed tests
+ -s <#> Skip listed tests
EOF
}
-while getopts 'hvc?' OPTION; do
+while getopts 'hvct:s:?' OPTION; do
case $OPTION in
h)
usage
c)
CLEANUP=1
;;
+ t)
+ TESTS_RUN=($OPTARG)
+ ;;
+ s)
+ TESTS_SKIP=($OPTARG)
+ ;;
?)
usage
exit
# Perform pre-cleanup is requested
if [ ${CLEANUP} ]; then
+ ${ZFS_SH} -u
+ cleanup_md_devices
cleanup_loop_devices
rm -f /tmp/zpool.cache.*
fi
+# Check if we need to skip the tests that require scsi_debug and lsscsi.
+SCSI_DEBUG=0
+${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
+HAVE_LSSCSI=0
+test -f ${LSSCSI} && HAVE_LSSCSI=1
+if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
+ echo "Skipping test 10 which requires the scsi_debug " \
+ "module and the ${LSSCSI} utility"
+fi
+
zconfig_partition() {
local DEVICE=$1
local START=$2
}
# Validate persistent zpool.cache configuration.
-zconfig_test1() {
+test_1() {
local POOL_NAME=test1
local TMP_FILE1=`mktemp`
local TMP_FILE2=`mktemp`
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
- echo -n "test 1 - persistent zpool.cache: "
-
# Create a pool save its status for comparison.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
pass
}
-zconfig_test1
+run_test 1 "persistent zpool.cache"
# Validate ZFS disk scanning and import w/out zpool.cache configuration.
-zconfig_test2() {
+test_2() {
local POOL_NAME=test2
local TMP_FILE1=`mktemp`
local TMP_FILE2=`mktemp`
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
- echo -n "test 2 - scan disks for pools to import: "
-
# Create a pool save its status for comparison.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
pass
}
-zconfig_test2
+run_test 2 "scan disks for pools to import"
zconfig_zvol_device_stat() {
local EXPECT=$1
local COUNT=0
# Briefly delay for udev
- sleep 1
+ sleep 3
# Pool exists
stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
# zpool import/export device check
# (1 volume, 2 partitions, 1 snapshot, 1 clone)
-zconfig_test3() {
+test_3() {
local POOL_NAME=tank
local ZVOL_NAME=volume
local SNAP_NAME=snap
local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
- echo -n "test 3 - zpool import/export device: "
-
# Create a pool, volume, partition, snapshot, and clone.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
pass
}
-zconfig_test3
+run_test 3 "zpool import/export device"
# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
-zconfig_test4() {
+test_4() {
POOL_NAME=tank
ZVOL_NAME=volume
SNAP_NAME=snap
FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
- echo -n "test 4 - zpool insmod/rmmod device: "
-
# Create a pool, volume, snapshot, and clone
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
pass
}
-zconfig_test4
+run_test 4 "zpool insmod/rmmod device"
# ZVOL volume sanity check
-zconfig_test5() {
+test_5() {
local POOL_NAME=tank
local ZVOL_NAME=fish
local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
local SRC_DIR=/bin/
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
- echo -n "test 5 - zvol+ext3 volume: "
-
# Create a pool and volume.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
pass
}
-zconfig_test5
+run_test 5 "zvol+ext3 volume"
# ZVOL snapshot sanity check
-zconfig_test6() {
+test_6() {
local POOL_NAME=tank
local ZVOL_NAME=fish
local SNAP_NAME=pristine
local SRC_DIR=/bin/
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
- echo -n "test 6 - zvol+ext2 snapshot: "
-
# Create a pool and volume.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
# Snapshot the pristine ext2 filesystem and mount it read-only.
- ${ZFS} snapshot ${FULL_SNAP_NAME} && sleep 1 || fail 8
+ ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
+ wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
mkdir -p /tmp/${SNAP_NAME}1 || fail 9
mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
pass
}
-zconfig_test6
+run_test 6 "zvol+ext2 snapshot"
# ZVOL clone sanity check
-zconfig_test7() {
+test_7() {
local POOL_NAME=tank
local ZVOL_NAME=fish
local SNAP_NAME=pristine
local SRC_DIR=/bin/
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
- echo -n "test 7 - zvol+ext2 clone: "
-
# Create a pool and volume.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
- ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
+ ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
- # Partition the volume, for a 400M volume there will be
- # 812 cylinders, 16 heads, and 63 sectors per track.
- zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
+ # Partition the volume, for a 300M volume there will be
+ # 609 cylinders, 16 heads, and 63 sectors per track.
+ zconfig_partition /dev/${FULL_ZVOL_NAME} 0 609
# Format the partition with ext2 (no journal).
/sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
# Snapshot the pristine ext2 filesystem and mount it read-only.
- ${ZFS} snapshot ${FULL_SNAP_NAME} && sleep 1 || fail 8
+ ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
+ wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
mkdir -p /tmp/${SNAP_NAME}1 || fail 9
mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
# Clone from the original pristine snapshot
- ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} && sleep 1 || fail 14
+ ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
+ wait_udev /dev/${FULL_CLONE_NAME}1 30 || fail 14
mkdir -p /tmp/${CLONE_NAME}1 || fail 15
mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
pass
}
-zconfig_test7
+run_test 7 "zvol+ext2 clone"
# Send/Receive sanity check
test_8() {
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
- ${ZFS} create -V 400M ${FULL_ZVOL_NAME1} || fail 4
+ ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
- # Partition the volume, for a 400M volume there will be
- # 812 cylinders, 16 heads, and 63 sectors per track.
- zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 812
+ # Partition the volume, for a 300M volume there will be
+ # 609 cylinders, 16 heads, and 63 sectors per track.
+ zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 609
# Format the partition with ext2.
/sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
sync || fail 9
# Snapshot the ext3 filesystem so it may be sent.
- ${ZFS} snapshot ${FULL_SNAP_NAME1} && sleep 1 || fail 11
+ ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
+ wait_udev /dev/${FULL_SNAP_NAME1} 30 || fail 11
# Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
(${ZFS} send ${FULL_SNAP_NAME1} | \
- ${ZFS} receive ${FULL_ZVOL_NAME2}) && sleep 1 || fail 12
+ ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
+ wait_udev /dev/${FULL_ZVOL_NAME2}1 30 || fail 12
# Mount the sent ext3 filesystem.
mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
# Create a pool and volume.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
- ${ZFS} create -V 400M ${FULL_NAME} || fail 3
+ ${ZFS} create -V 300M ${FULL_NAME} || fail 3
# Dump the events, there should be at least 5 lines.
${ZPOOL} events >${TMP_EVENTS} || fail 4
}
run_test 9 "zpool events"
+zconfig_add_vdev() {
+ local POOL_NAME=$1
+ local TYPE=$2
+ local DEVICE=$3
+ local TMP_FILE1=`mktemp`
+ local TMP_FILE2=`mktemp`
+ local TMP_FILE3=`mktemp`
+
+ BASE_DEVICE=`basename ${DEVICE}`
+
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
+ ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
+ diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
+
+ [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
+
+ PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
+ case $TYPE in
+ cache)
+ [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
+ ;;
+ log)
+ [ "${PARENT_VDEV}" = "logs" ] || return 1
+ ;;
+ esac
+
+ if ! tail -1 ${TMP_FILE3} |
+ egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
+ return 1
+ fi
+ rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
+
+ return 0
+}
+
+# zpool add and remove sanity check
+test_10() {
+ local POOL_NAME=tank
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+ local TMP_FILE1=`mktemp`
+ local TMP_FILE2=`mktemp`
+
+ if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
+ skip
+ return
+ fi
+
+ test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
+ (${RMMOD} scsi_debug || exit 1)
+
+ /sbin/modprobe scsi_debug dev_size_mb=128 ||
+ die "Error $? creating scsi_debug device"
+ udev_trigger
+
+ SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
+ BASE_SDDEVICE=`basename $SDDEVICE`
+
+ # Create a pool
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
+
+ # Add and remove a cache vdev by full path
+ zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
+ ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
+ cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
+
+ # Add and remove a cache vdev by shorthand path
+ zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
+ ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
+ cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
+
+ # Add and remove a log vdev
+ zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
+ ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
+ cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
+
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
+ ${ZFS_SH} -u || fail 17
+ ${RMMOD} scsi_debug || fail 18
+
+ rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
+
+ pass
+}
+run_test 10 "zpool add/remove vdev"
+
exit 0