Fix test script error codes
[zfs.git] / scripts / zconfig.sh
1 #!/bin/bash
2 #
3 # ZFS/ZPOOL configuration test script.
4
5 basedir="$(dirname $0)"
6
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
10 else
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12 fi
13
14 PROG=zconfig.sh
15
16 usage() {
17 cat << EOF
18 USAGE:
19 $0 [hvcts]
20
21 DESCRIPTION:
22         ZFS/ZPOOL configuration tests
23
24 OPTIONS:
25         -h      Show this message
26         -v      Verbose
27         -c      Cleanup lo+file devices at start
28         -t <#>  Run listed tests
29         -s <#>  Skip listed tests
30
31 EOF
32 }
33
34 while getopts 'hvct:s:?' OPTION; do
35         case $OPTION in
36         h)
37                 usage
38                 exit 1
39                 ;;
40         v)
41                 VERBOSE=1
42                 ;;
43         c)
44                 CLEANUP=1
45                 ;;
46         t)
47                 TESTS_RUN=($OPTARG)
48                 ;;
49         s)
50                 TESTS_SKIP=($OPTARG)
51                 ;;
52         ?)
53                 usage
54                 exit
55                 ;;
56         esac
57 done
58
59 if [ $(id -u) != 0 ]; then
60         die "Must run as root"
61 fi
62
63 # Initialize the test suite
64 init
65
66 # Perform pre-cleanup is requested
67 if [ ${CLEANUP} ]; then
68         ${ZFS_SH} -u
69         cleanup_md_devices
70         cleanup_loop_devices
71         rm -f /tmp/zpool.cache.*
72 fi
73
74 # Check if we need to skip the tests that require scsi_debug and lsscsi.
75 SCSI_DEBUG=0
76 ${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
77 HAVE_LSSCSI=0
78 test -f ${LSSCSI} && HAVE_LSSCSI=1
79 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
80         echo "Skipping test 10 which requires the scsi_debug " \
81                 "module and the ${LSSCSI} utility"
82 fi
83
84 # Validate persistent zpool.cache configuration.
85 test_1() {
86         local POOL_NAME=test1
87         local TMP_FILE1=`mktemp`
88         local TMP_FILE2=`mktemp`
89         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
90
91         # Create a pool save its status for comparison.
92         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
93         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
94         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
95
96         # Unload/load the module stack and verify the pool persists.
97         ${ZFS_SH} -u || fail 4
98         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
99         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
100         cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
101
102         # Cleanup the test pool and temporary files
103         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
104         rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
105         ${ZFS_SH} -u || fail 10
106
107         pass
108 }
109 run_test 1 "persistent zpool.cache"
110
111 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
112 test_2() {
113         local POOL_NAME=test2
114         local TMP_FILE1=`mktemp`
115         local TMP_FILE2=`mktemp`
116         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
117
118         # Create a pool save its status for comparison.
119         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
120         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
121         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
122
123         # Unload the module stack, remove the cache file, load the module
124         # stack and attempt to probe the disks to import the pool.  As
125         # a cross check verify the old pool state against the imported.
126         ${ZFS_SH} -u || fail 4
127         rm -f ${TMP_CACHE} || fail 5
128         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
129         ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
130         ${ZPOOL} import -f ${POOL_NAME} || fail 8
131         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
132         cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
133
134         # Cleanup the test pool and temporary files
135         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
136         rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
137         ${ZFS_SH} -u || fail 13
138
139         pass
140 }
141 run_test 2 "scan disks for pools to import"
142
143 zconfig_zvol_device_stat() {
144         local EXPECT=$1
145         local POOL_NAME=/dev/zvol/$2
146         local ZVOL_NAME=/dev/zvol/$3
147         local SNAP_NAME=/dev/zvol/$4
148         local CLONE_NAME=/dev/zvol/$5
149         local COUNT=0
150
151         # Briefly delay for udev
152         udev_trigger
153
154         # Pool exists
155         stat ${POOL_NAME} &>/dev/null   && let COUNT=$COUNT+1
156
157         # Volume and partitions
158         stat ${ZVOL_NAME}  &>/dev/null  && let COUNT=$COUNT+1
159         stat ${ZVOL_NAME}-part1 &>/dev/null  && let COUNT=$COUNT+1
160         stat ${ZVOL_NAME}-part2 &>/dev/null  && let COUNT=$COUNT+1
161
162         # Snapshot with partitions
163         stat ${SNAP_NAME}  &>/dev/null  && let COUNT=$COUNT+1
164         stat ${SNAP_NAME}-part1 &>/dev/null  && let COUNT=$COUNT+1
165         stat ${SNAP_NAME}-part2 &>/dev/null  && let COUNT=$COUNT+1
166
167         # Clone with partitions
168         stat ${CLONE_NAME}  &>/dev/null && let COUNT=$COUNT+1
169         stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
170         stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
171
172         if [ $EXPECT -ne $COUNT ]; then
173                 return 1
174         fi
175
176         return 0
177 }
178
179 # zpool import/export device check
180 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
181 test_3() {
182         local POOL_NAME=tank
183         local ZVOL_NAME=volume
184         local SNAP_NAME=snap
185         local CLONE_NAME=clone
186         local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
187         local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
188         local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
189         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
190
191         # Create a pool, volume, partition, snapshot, and clone.
192         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
193         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
194         ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
195         label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
196         partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4
197         partition /dev/zvol/${FULL_ZVOL_NAME} primary 50% 100% || fail 4
198         ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
199         ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
200
201         # Verify the devices were created
202         zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
203             ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
204
205         # Export the pool
206         ${ZPOOL} export ${POOL_NAME} || fail 8
207
208         # verify the devices were removed
209         zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
210             ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
211
212         # Import the pool, wait 1 second for udev
213         ${ZPOOL} import ${POOL_NAME} || fail 10
214
215         # Verify the devices were created
216         zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
217             ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
218
219         # Destroy the pool and consequently the devices
220         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
221
222         # verify the devices were removed
223         zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
224             ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
225
226         ${ZFS_SH} -u || fail 14
227         rm -f ${TMP_CACHE} || fail 15
228
229         pass
230 }
231 run_test 3 "zpool import/export device"
232
233 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
234 test_4() {
235         POOL_NAME=tank
236         ZVOL_NAME=volume
237         SNAP_NAME=snap
238         CLONE_NAME=clone
239         FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
240         FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
241         FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
242         TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
243
244         # Create a pool, volume, snapshot, and clone
245         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
246         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
247         ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
248         label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
249         partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4
250         partition /dev/zvol/${FULL_ZVOL_NAME} primary 50% 100% || fail 4
251         ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
252         ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
253
254         # Verify the devices were created
255         zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
256             ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
257
258         # Unload the modules
259         ${ZFS_SH} -u || fail 8
260
261         # Verify the devices were removed
262         zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
263             ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
264
265         # Load the modules, wait 1 second for udev
266         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
267
268         # Verify the devices were created
269         zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
270             ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
271
272         # Destroy the pool and consequently the devices
273         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
274
275         # Verify the devices were removed
276         zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
277             ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
278
279         ${ZFS_SH} -u || fail 14
280         rm -f ${TMP_CACHE} || fail 15
281
282         pass
283 }
284 run_test 4 "zpool insmod/rmmod device"
285
286 # ZVOL volume sanity check
287 test_5() {
288         local POOL_NAME=tank
289         local ZVOL_NAME=fish
290         local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
291         local SRC_DIR=/bin/
292         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
293
294         # Create a pool and volume.
295         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
296         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
297         ${ZFS} create -V 800M ${FULL_NAME} || fail 3
298         label /dev/zvol/${FULL_NAME} msdos || fail 4
299         partition /dev/zvol/${FULL_NAME} primary 1 -1 || fail 4
300         format /dev/zvol/${FULL_NAME}-part1 ext2 || fail 5
301
302         # Mount the ext2 filesystem and copy some data to it.
303         mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
304         mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7
305         cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8
306         sync
307
308         # Verify the copied files match the original files.
309         diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
310                 &>/dev/null || fail 9
311
312         # Remove the files, umount, destroy the volume and pool.
313         rm -Rf /tmp/${ZVOL_NAME}-part1${SRC_DIR}* || fail 10
314         umount /tmp/${ZVOL_NAME}-part1 || fail 11
315         rmdir /tmp/${ZVOL_NAME}-part1 || fail 12
316
317         ${ZFS} destroy ${FULL_NAME} || fail 13
318         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
319         ${ZFS_SH} -u || fail 15
320         rm -f ${TMP_CACHE} || fail 16
321
322         pass
323 }
324 run_test 5 "zvol+ext2 volume"
325
326 # ZVOL snapshot sanity check
327 test_6() {
328         local POOL_NAME=tank
329         local ZVOL_NAME=fish
330         local SNAP_NAME=pristine
331         local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
332         local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
333         local SRC_DIR=/bin/
334         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
335
336         # Create a pool and volume.
337         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
338         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
339         ${ZFS} create -V 800M ${FULL_ZVOL_NAME} || fail 3
340         label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
341         partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4
342         format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5
343
344         # Mount the ext2 filesystem and copy some data to it.
345         mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
346         mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
347                 || fail 7
348
349         # Snapshot the pristine ext2 filesystem and mount it read-only.
350         ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
351         wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
352         mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
353         mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \
354                 &>/dev/null || fail 10
355
356         # Copy to original volume
357         cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
358         sync
359
360         # Verify the copied files match the original files,
361         # and the copied files do NOT appear in the snapshot.
362         diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
363                 &>/dev/null || fail 12
364         diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
365                 &>/dev/null && fail 13
366
367         # umount, destroy the snapshot, volume, and pool.
368         umount /tmp/${SNAP_NAME}-part1 || fail 14
369         rmdir /tmp/${SNAP_NAME}-part1 || fail 15
370         ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
371
372         umount /tmp/${ZVOL_NAME}-part1 || fail 17
373         rmdir /tmp/${ZVOL_NAME}-part1 || fail 18
374         ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
375
376         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
377         ${ZFS_SH} -u || fail 21
378         rm -f ${TMP_CACHE} || fail 22
379
380         pass
381 }
382 run_test 6 "zvol+ext2 snapshot"
383
384 # ZVOL clone sanity check
385 test_7() {
386         local POOL_NAME=tank
387         local ZVOL_NAME=fish
388         local SNAP_NAME=pristine
389         local CLONE_NAME=clone
390         local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
391         local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
392         local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
393         local SRC_DIR=/bin/
394         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
395
396         # Create a pool and volume.
397         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
398         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
399         ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
400         label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
401         partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4
402         format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5
403
404         # Mount the ext2 filesystem and copy some data to it.
405         mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
406         mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
407                 || fail 7
408
409         # Snapshot the pristine ext2 filesystem and mount it read-only.
410         ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
411         wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
412         mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
413         mount /dev/zvol/${FULL_SNAP_NAME}-part1 \
414                 /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10
415
416         # Copy to original volume.
417         cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
418         sync
419
420         # Verify the copied files match the original files,
421         # and the copied files do NOT appear in the snapshot.
422         diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
423                 &>/dev/null || fail 12
424         diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
425                 &>/dev/null && fail 13
426
427         # Clone from the original pristine snapshot
428         ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
429         wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14
430         mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15
431         mount /dev/zvol/${FULL_CLONE_NAME}-part1 \
432                 /tmp/${CLONE_NAME}-part1 || fail 16
433
434         # Verify the clone matches the pristine snapshot,
435         # and the files copied to the original volume are NOT there.
436         diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
437                 &>/dev/null || fail 17
438         diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
439                 &>/dev/null && fail 18
440
441         # Copy to cloned volume.
442         cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19
443         sync
444
445         # Verify the clone matches the modified original volume.
446         diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
447                 &>/dev/null || fail 20
448
449         # umount, destroy the snapshot, volume, and pool.
450         umount /tmp/${CLONE_NAME}-part1 || fail 21
451         rmdir /tmp/${CLONE_NAME}-part1 || fail 22
452         ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
453
454         umount /tmp/${SNAP_NAME}-part1 || fail 24
455         rmdir /tmp/${SNAP_NAME}-part1 || fail 25
456         ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
457
458         umount /tmp/${ZVOL_NAME}-part1 || fail 27
459         rmdir /tmp/${ZVOL_NAME}-part1 || fail 28
460         ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
461
462         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
463         ${ZFS_SH} -u || fail 31
464         rm -f ${TMP_CACHE} || fail 32
465
466         pass
467 }
468 run_test 7 "zvol+ext2 clone"
469
470 # Send/Receive sanity check
471 test_8() {
472         local POOL_NAME1=tank1
473         local POOL_NAME2=tank2
474         local ZVOL_NAME=fish
475         local SNAP_NAME=snap
476         local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
477         local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
478         local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
479         local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
480         local SRC_DIR=/bin/
481         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
482
483         # Create two pools and a volume
484         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
485         ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
486         ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 2
487         ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 3
488         label /dev/zvol/${FULL_ZVOL_NAME1} msdos || fail 4
489         partition /dev/zvol/${FULL_ZVOL_NAME1} primary 1 -1 || fail 4
490         format /dev/zvol/${FULL_ZVOL_NAME1}-part1 ext2 || fail 5
491
492         # Mount the ext2 filesystem and copy some data to it.
493         mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6
494         mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
495                 /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7
496         cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8
497         sync || fail 9
498
499         # Snapshot the ext2 filesystem so it may be sent.
500         ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
501         wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 11
502
503         # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
504         (${ZFS} send ${FULL_SNAP_NAME1} | \
505         ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
506         wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12
507
508         # Mount the sent ext2 filesystem.
509         mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13
510         mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \
511                 /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14
512
513         # Verify the contents of the volumes match
514         diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \
515             &>/dev/null || fail 15
516
517         # Umount, destroy the volume and pool.
518         umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16
519         umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17
520         rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18
521         rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19
522         rmdir /tmp/${POOL_NAME1} || fail 20
523         rmdir /tmp/${POOL_NAME2} || fail 21
524
525         ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
526         ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
527         ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
528         ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
529         ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
530         ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
531         ${ZFS_SH} -u || fail 28
532         rm -f ${TMP_CACHE} || fail 29
533
534         pass
535 }
536 run_test 8 "zfs send/receive"
537
538 # zpool event sanity check
539 test_9() {
540         local POOL_NAME=tank
541         local ZVOL_NAME=fish
542         local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
543         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
544         local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
545
546         # Create a pool and volume.
547         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
548         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
549         ${ZFS} create -V 300M ${FULL_NAME} || fail 3
550
551         # Dump the events, there should be at least 5 lines.
552         ${ZPOOL} events >${TMP_EVENTS} || fail 4
553         EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
554         [ $EVENTS -lt 5 ] && fail 5
555
556         # Clear the events and ensure there are none.
557         ${ZPOOL} events -c >/dev/null || fail 6
558         ${ZPOOL} events >${TMP_EVENTS} || fail 7
559         EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
560         [ $EVENTS -gt 1 ] && fail 8
561
562         ${ZFS} destroy ${FULL_NAME} || fail 9
563         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
564         ${ZFS_SH} -u || fail 11
565         rm -f ${TMP_CACHE} || fail 12
566         rm -f ${TMP_EVENTS} || fail 13
567
568         pass
569 }
570 run_test 9 "zpool events"
571
572 zconfig_add_vdev() {
573         local POOL_NAME=$1
574         local TYPE=$2
575         local DEVICE=$3
576         local TMP_FILE1=`mktemp`
577         local TMP_FILE2=`mktemp`
578         local TMP_FILE3=`mktemp`
579
580         BASE_DEVICE=`basename ${DEVICE}`
581
582         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
583         ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
584         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
585         diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
586
587         [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
588
589         PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
590         case $TYPE in
591         cache)
592                 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
593                 ;;
594         log)
595                 [ "${PARENT_VDEV}" = "logs" ] || return 1
596                 ;;
597         esac
598
599         if ! tail -1 ${TMP_FILE3} |
600             egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
601                 return 1
602         fi
603         rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
604
605         return 0
606 }
607
608 # zpool add and remove sanity check
609 test_10() {
610         local POOL_NAME=tank
611         local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
612         local TMP_FILE1=`mktemp`
613         local TMP_FILE2=`mktemp`
614
615         if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
616                 skip
617                 return
618         fi
619
620         test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
621                 (${RMMOD} scsi_debug || exit 1)
622
623         /sbin/modprobe scsi_debug dev_size_mb=128 ||
624                 die "Error $? creating scsi_debug device"
625         udev_trigger
626
627         SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
628         BASE_SDDEVICE=`basename $SDDEVICE`
629
630         # Create a pool
631         ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
632         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
633         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
634
635         # Add and remove a cache vdev by full path
636         zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
637         ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
638         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
639         cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
640         sleep 1
641
642         # Add and remove a cache vdev by shorthand path
643         zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
644         ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
645         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
646         cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
647         sleep 1
648
649         # Add and remove a log vdev
650         zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
651         ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
652         ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
653         cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
654
655         ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
656         ${ZFS_SH} -u || fail 17
657         ${RMMOD} scsi_debug || fail 18
658
659         rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
660
661         pass
662 }
663 run_test 10 "zpool add/remove vdev"
664
665 exit 0