X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=scripts%2Fzconfig.sh;h=281166c59ba756f3540b090ce148dc0f43d71c18;hb=refs%2Fheads%2Frertzinger%2Ffeature-zpool-get--p;hp=7a23d790e7cf6e4c8167b296a629ca3c843b58ad;hpb=0ee8118bd31d1c160123d0aac9c55455706d5975;p=zfs.git diff --git a/scripts/zconfig.sh b/scripts/zconfig.sh index 7a23d79..281166c 100755 --- a/scripts/zconfig.sh +++ b/scripts/zconfig.sh @@ -16,7 +16,7 @@ PROG=zconfig.sh usage() { cat << EOF USAGE: -$0 [hvc] +$0 [hvcts] DESCRIPTION: ZFS/ZPOOL configuration tests @@ -25,6 +25,8 @@ OPTIONS: -h Show this message -v Verbose -c Cleanup lo+file devices at start + -t <#> Run listed tests + -s <#> Skip listed tests EOF } @@ -58,28 +60,26 @@ if [ $(id -u) != 0 ]; then die "Must run as root" fi +# Initialize the test suite +init + # Perform pre-cleanup is requested if [ ${CLEANUP} ]; then + ${ZFS_SH} -u cleanup_md_devices cleanup_loop_devices rm -f /tmp/zpool.cache.* fi -zconfig_partition() { - local DEVICE=$1 - local START=$2 - local END=$3 - local TMP_FILE=`mktemp` - - /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4 -${START},${END} -; -; -; -EOF - - rm ${TMP_FILE} -} +# Check if we need to skip the tests that require scsi_debug and lsscsi. +SCSI_DEBUG=0 +${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1 +HAVE_LSSCSI=0 +test -f ${LSSCSI} && HAVE_LSSCSI=1 +if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then + echo "Skipping test 10 which requires the scsi_debug " \ + "module and the ${LSSCSI} utility" +fi # Validate persistent zpool.cache configuration. test_1() { @@ -127,7 +127,7 @@ test_2() { rm -f ${TMP_CACHE} || fail 5 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7 - ${ZPOOL} import ${POOL_NAME} || fail 8 + ${ZPOOL} import -f ${POOL_NAME} || fail 8 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10 @@ -142,32 +142,32 @@ run_test 2 "scan disks for pools to import" zconfig_zvol_device_stat() { local EXPECT=$1 - local POOL_NAME=/dev/$2 - local ZVOL_NAME=/dev/$3 - local SNAP_NAME=/dev/$4 - local CLONE_NAME=/dev/$5 + local POOL_NAME=/dev/zvol/$2 + local ZVOL_NAME=/dev/zvol/$3 + local SNAP_NAME=/dev/zvol/$4 + local CLONE_NAME=/dev/zvol/$5 local COUNT=0 # Briefly delay for udev - sleep 3 + udev_trigger # Pool exists stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1 # Volume and partitions stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1 - stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1 - stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1 + stat ${ZVOL_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1 + stat ${ZVOL_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1 # Snapshot with partitions stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1 - stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1 - stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1 + stat ${SNAP_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1 + stat ${SNAP_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1 # Clone with partitions stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1 - stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1 - stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1 + stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1 + stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1 if [ $EXPECT -ne $COUNT ]; then return 1 @@ -192,7 +192,10 @@ test_3() { ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3 - zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4 + ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3 + label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4 + partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4 + partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6 @@ -243,7 +246,10 @@ test_4() { ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3 - zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4 + ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3 + label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4 + partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4 + partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6 @@ -258,8 +264,9 @@ test_4() { zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9 - # Load the modules, wait 1 second for udev + # Load the modules, list the pools to ensure they are opened ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10 + ${ZPOOL} list &>/dev/null # Verify the devices were created zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ @@ -284,34 +291,30 @@ test_5() { local POOL_NAME=tank local ZVOL_NAME=fish local FULL_NAME=${POOL_NAME}/${ZVOL_NAME} - local SRC_DIR=/bin/ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool and volume. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 - ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 - ${ZFS} create -V 400M ${FULL_NAME} || fail 3 - - # Partition the volume, for a 400M volume there will be - # 812 cylinders, 16 heads, and 63 sectors per track. - zconfig_partition /dev/${FULL_NAME} 0 812 - - # Format the partition with ext3. - /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5 - - # Mount the ext3 filesystem and copy some data to it. - mkdir -p /tmp/${ZVOL_NAME}1 || fail 6 - mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7 - cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8 + ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2 + ${ZFS} create -V 800M ${FULL_NAME} || fail 3 + label /dev/zvol/${FULL_NAME} msdos || fail 4 + partition /dev/zvol/${FULL_NAME} primary 1 -1 || fail 4 + format /dev/zvol/${FULL_NAME}-part1 ext2 || fail 5 + + # Mount the ext2 filesystem and copy some data to it. + mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6 + mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7 + cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8 sync # Verify the copied files match the original files. - diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9 + diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \ + &>/dev/null || fail 9 # Remove the files, umount, destroy the volume and pool. - rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10 - umount /tmp/${ZVOL_NAME}1 || fail 11 - rmdir /tmp/${ZVOL_NAME}1 || fail 12 + rm -Rf /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} || fail 10 + umount /tmp/${ZVOL_NAME}-part1 || fail 11 + rmdir /tmp/${ZVOL_NAME}-part1 || fail 12 ${ZFS} destroy ${FULL_NAME} || fail 13 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14 @@ -320,7 +323,7 @@ test_5() { pass } -run_test 5 "zvol+ext3 volume" +run_test 5 "zvol+ext2 volume" # ZVOL snapshot sanity check test_6() { @@ -329,47 +332,47 @@ test_6() { local SNAP_NAME=pristine local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} - local SRC_DIR=/bin/ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool and volume. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 - ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 - ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3 - - # Partition the volume, for a 400M volume there will be - # 812 cylinders, 16 heads, and 63 sectors per track. - zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812 - - # Format the partition with ext2 (no journal). - /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5 - - # Mount the ext3 filesystem and copy some data to it. - mkdir -p /tmp/${ZVOL_NAME}1 || fail 6 - mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7 + ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2 + ${ZFS} create -V 800M ${FULL_ZVOL_NAME} || fail 3 + ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3 + label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4 + partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4 + format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5 + + # Mount the ext2 filesystem and copy some data to it. + mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6 + mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \ + || fail 7 # Snapshot the pristine ext2 filesystem and mount it read-only. ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8 - wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8 - mkdir -p /tmp/${SNAP_NAME}1 || fail 9 - mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10 + wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8 + mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9 + mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \ + &>/dev/null || fail 10 # Copy to original volume - cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11 + cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11 sync # Verify the copied files match the original files, # and the copied files do NOT appear in the snapshot. - diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12 - diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13 + diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \ + &>/dev/null || fail 12 + diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \ + &>/dev/null && fail 13 # umount, destroy the snapshot, volume, and pool. - umount /tmp/${SNAP_NAME}1 || fail 14 - rmdir /tmp/${SNAP_NAME}1 || fail 15 + umount /tmp/${SNAP_NAME}-part1 || fail 14 + rmdir /tmp/${SNAP_NAME}-part1 || fail 15 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16 - umount /tmp/${ZVOL_NAME}1 || fail 17 - rmdir /tmp/${ZVOL_NAME}1 || fail 18 + umount /tmp/${ZVOL_NAME}-part1 || fail 17 + rmdir /tmp/${ZVOL_NAME}-part1 || fail 18 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20 @@ -389,69 +392,73 @@ test_7() { local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} - local SRC_DIR=/bin/ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool and volume. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3 + ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3 + label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4 + partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4 + format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5 - # Partition the volume, for a 300M volume there will be - # 609 cylinders, 16 heads, and 63 sectors per track. - zconfig_partition /dev/${FULL_ZVOL_NAME} 0 609 - - # Format the partition with ext2 (no journal). - /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5 - - # Mount the ext3 filesystem and copy some data to it. - mkdir -p /tmp/${ZVOL_NAME}1 || fail 6 - mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7 + # Mount the ext2 filesystem and copy some data to it. + mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6 + mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \ + || fail 7 # Snapshot the pristine ext2 filesystem and mount it read-only. ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8 - wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8 - mkdir -p /tmp/${SNAP_NAME}1 || fail 9 - mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10 + wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8 + mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9 + mount /dev/zvol/${FULL_SNAP_NAME}-part1 \ + /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10 # Copy to original volume. - cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11 + cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11 sync # Verify the copied files match the original files, # and the copied files do NOT appear in the snapshot. - diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12 - diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13 + diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \ + &>/dev/null || fail 12 + diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \ + &>/dev/null && fail 13 # Clone from the original pristine snapshot ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14 - wait_udev /dev/${FULL_CLONE_NAME}1 30 || fail 14 - mkdir -p /tmp/${CLONE_NAME}1 || fail 15 - mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16 + wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14 + mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15 + mount /dev/zvol/${FULL_CLONE_NAME}-part1 \ + /tmp/${CLONE_NAME}-part1 || fail 16 # Verify the clone matches the pristine snapshot, # and the files copied to the original volume are NOT there. - diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17 - diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18 + diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \ + &>/dev/null || fail 17 + diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \ + &>/dev/null && fail 18 # Copy to cloned volume. - cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19 + cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19 sync # Verify the clone matches the modified original volume. - diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20 + diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \ + &>/dev/null || fail 20 # umount, destroy the snapshot, volume, and pool. - umount /tmp/${CLONE_NAME}1 || fail 21 - rmdir /tmp/${CLONE_NAME}1 || fail 22 + umount /tmp/${CLONE_NAME}-part1 || fail 21 + rmdir /tmp/${CLONE_NAME}-part1 || fail 22 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23 - umount /tmp/${SNAP_NAME}1 || fail 24 - rmdir /tmp/${SNAP_NAME}1 || fail 25 + umount /tmp/${SNAP_NAME}-part1 || fail 24 + rmdir /tmp/${SNAP_NAME}-part1 || fail 25 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26 - umount /tmp/${ZVOL_NAME}1 || fail 27 - rmdir /tmp/${ZVOL_NAME}1 || fail 28 + umount /tmp/${ZVOL_NAME}-part1 || fail 27 + rmdir /tmp/${ZVOL_NAME}-part1 || fail 28 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30 @@ -472,50 +479,48 @@ test_8() { local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME} local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME} local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME} - local SRC_DIR=/bin/ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create two pools and a volume ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2 - ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3 - ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4 - - # Partition the volume, for a 300M volume there will be - # 609 cylinders, 16 heads, and 63 sectors per track. - zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 609 - - # Format the partition with ext2. - /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5 - - # Mount the ext3 filesystem and copy some data to it. - mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6 - mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7 - cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8 + ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 2 + ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 3 + ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME1} || fail 3 + label /dev/zvol/${FULL_ZVOL_NAME1} msdos || fail 4 + partition /dev/zvol/${FULL_ZVOL_NAME1} primary 1 -1 || fail 4 + format /dev/zvol/${FULL_ZVOL_NAME1}-part1 ext2 || fail 5 + + # Mount the ext2 filesystem and copy some data to it. + mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6 + mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \ + /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7 + cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8 sync || fail 9 - # Snapshot the ext3 filesystem so it may be sent. + # Snapshot the ext2 filesystem so it may be sent. ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11 - wait_udev /dev/${FULL_SNAP_NAME1} 30 || fail 11 + wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 11 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2 (${ZFS} send ${FULL_SNAP_NAME1} | \ ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12 - wait_udev /dev/${FULL_ZVOL_NAME2}1 30 || fail 12 + wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12 - # Mount the sent ext3 filesystem. - mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13 - mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14 + # Mount the sent ext2 filesystem. + mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13 + mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \ + /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14 # Verify the contents of the volumes match - diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \ + diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \ &>/dev/null || fail 15 # Umount, destroy the volume and pool. - umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16 - umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17 - rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18 - rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19 + umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16 + umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17 + rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18 + rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19 rmdir /tmp/${POOL_NAME1} || fail 20 rmdir /tmp/${POOL_NAME2} || fail 21 @@ -566,5 +571,97 @@ test_9() { } run_test 9 "zpool events" -exit 0 +zconfig_add_vdev() { + local POOL_NAME=$1 + local TYPE=$2 + local DEVICE=$3 + local TMP_FILE1=`mktemp` + local TMP_FILE2=`mktemp` + local TMP_FILE3=`mktemp` + + BASE_DEVICE=`basename ${DEVICE}` + + ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} + ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1 + ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} + diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3} + [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1 + + PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'` + case $TYPE in + cache) + [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1 + ;; + log) + [ "${PARENT_VDEV}" = "logs" ] || return 1 + ;; + esac + + if ! tail -1 ${TMP_FILE3} | + egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then + return 1 + fi + rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3} + + return 0 +} + +# zpool add and remove sanity check +test_10() { + local POOL_NAME=tank + local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` + local TMP_FILE1=`mktemp` + local TMP_FILE2=`mktemp` + + if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then + skip + return + fi + + test `${LSMOD} | grep -c scsi_debug` -gt 0 && \ + (${RMMOD} scsi_debug || exit 1) + + /sbin/modprobe scsi_debug dev_size_mb=128 || + die "Error $? creating scsi_debug device" + udev_trigger + + SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'` + BASE_SDDEVICE=`basename $SDDEVICE` + + # Create a pool + ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 + ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 + ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3 + + # Add and remove a cache vdev by full path + zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4 + ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5 + ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6 + cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7 + sleep 1 + + # Add and remove a cache vdev by shorthand path + zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8 + ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9 + ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10 + cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11 + sleep 1 + + # Add and remove a log vdev + zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12 + ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13 + ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14 + cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15 + + ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16 + ${ZFS_SH} -u || fail 17 + ${RMMOD} scsi_debug || fail 18 + + rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19 + + pass +} +run_test 10 "zpool add/remove vdev" + +exit 0