4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
28 * The objective of this program is to provide a DMU/ZAP/SPA stress test
29 * that runs entirely in userland, is easy to use, and easy to extend.
31 * The overall design of the ztest program is as follows:
33 * (1) For each major functional area (e.g. adding vdevs to a pool,
34 * creating and destroying datasets, reading and writing objects, etc)
35 * we have a simple routine to test that functionality. These
36 * individual routines do not have to do anything "stressful".
38 * (2) We turn these simple functionality tests into a stress test by
39 * running them all in parallel, with as many threads as desired,
40 * and spread across as many datasets, objects, and vdevs as desired.
42 * (3) While all this is happening, we inject faults into the pool to
43 * verify that self-healing data really works.
45 * (4) Every time we open a dataset, we change its checksum and compression
46 * functions. Thus even individual objects vary from block to block
47 * in which checksum they use and whether they're compressed.
49 * (5) To verify that we never lose on-disk consistency after a crash,
50 * we run the entire test in a child of the main process.
51 * At random times, the child self-immolates with a SIGKILL.
52 * This is the software equivalent of pulling the power cord.
53 * The parent then runs the test again, using the existing
54 * storage pool, as many times as desired. If backwards compatability
55 * testing is enabled ztest will sometimes run the "older" version
56 * of ztest after a SIGKILL.
58 * (6) To verify that we don't have future leaks or temporal incursions,
59 * many of the functional tests record the transaction group number
60 * as part of their data. When reading old data, they verify that
61 * the transaction group number is less than the current, open txg.
62 * If you add a new test, please do this if applicable.
64 * (7) Threads are created with a reduced stack size, for sanity checking.
65 * Therefore, it's important not to allocate huge buffers on the stack.
67 * When run with no arguments, ztest runs for about five minutes and
68 * produces no output if successful. To get a little bit of information,
69 * specify -V. To get more information, specify -VV, and so on.
71 * To turn this into an overnight stress test, use -T to specify run time.
73 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
74 * to increase the pool capacity, fanout, and overall stress level.
76 * Use the -k option to set the desired frequency of kills.
78 * When ztest invokes itself it passes all relevant information through a
79 * temporary file which is mmap-ed in the child process. This allows shared
80 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
81 * stored at offset 0 of this file and contains information on the size and
82 * number of shared structures in the file. The information stored in this file
83 * must remain backwards compatible with older versions of ztest so that
84 * ztest can invoke them during backwards compatibility testing (-B).
87 #include <sys/zfs_context.h>
93 #include <sys/dmu_objset.h>
99 #include <sys/resource.h>
102 #include <sys/zil_impl.h>
103 #include <sys/vdev_impl.h>
104 #include <sys/vdev_file.h>
105 #include <sys/spa_impl.h>
106 #include <sys/metaslab_impl.h>
107 #include <sys/dsl_prop.h>
108 #include <sys/dsl_dataset.h>
109 #include <sys/dsl_scan.h>
110 #include <sys/zio_checksum.h>
111 #include <sys/refcount.h>
112 #include <sys/zfeature.h>
114 #include <stdio_ext.h>
122 #include <sys/fs/zfs.h>
123 #include <libnvpair.h>
125 static int ztest_fd_data = -1;
126 static int ztest_fd_rand = -1;
128 typedef struct ztest_shared_hdr {
129 uint64_t zh_hdr_size;
130 uint64_t zh_opts_size;
132 uint64_t zh_stats_size;
133 uint64_t zh_stats_count;
135 uint64_t zh_ds_count;
136 } ztest_shared_hdr_t;
138 static ztest_shared_hdr_t *ztest_shared_hdr;
140 typedef struct ztest_shared_opts {
141 char zo_pool[MAXNAMELEN];
142 char zo_dir[MAXNAMELEN];
143 char zo_alt_ztest[MAXNAMELEN];
144 char zo_alt_libpath[MAXNAMELEN];
146 uint64_t zo_vdevtime;
154 uint64_t zo_passtime;
155 uint64_t zo_killrate;
159 uint64_t zo_maxloops;
160 uint64_t zo_metaslab_gang_bang;
161 } ztest_shared_opts_t;
163 static const ztest_shared_opts_t ztest_opts_defaults = {
164 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
165 .zo_dir = { '/', 't', 'm', 'p', '\0' },
166 .zo_alt_ztest = { '\0' },
167 .zo_alt_libpath = { '\0' },
169 .zo_ashift = SPA_MINBLOCKSHIFT,
172 .zo_raidz_parity = 1,
173 .zo_vdev_size = SPA_MINDEVSIZE,
176 .zo_passtime = 60, /* 60 seconds */
177 .zo_killrate = 70, /* 70% kill rate */
180 .zo_time = 300, /* 5 minutes */
181 .zo_maxloops = 50, /* max loops during spa_freeze() */
182 .zo_metaslab_gang_bang = 32 << 10
185 extern uint64_t metaslab_gang_bang;
186 extern uint64_t metaslab_df_alloc_threshold;
188 static ztest_shared_opts_t *ztest_shared_opts;
189 static ztest_shared_opts_t ztest_opts;
191 typedef struct ztest_shared_ds {
195 static ztest_shared_ds_t *ztest_shared_ds;
196 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
198 #define BT_MAGIC 0x123456789abcdefULL
199 #define MAXFAULTS() \
200 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
204 ZTEST_IO_WRITE_PATTERN,
205 ZTEST_IO_WRITE_ZEROES,
211 typedef struct ztest_block_tag {
221 typedef struct bufwad {
228 * XXX -- fix zfs range locks to be generic so we can use them here.
250 #define ZTEST_RANGE_LOCKS 64
251 #define ZTEST_OBJECT_LOCKS 64
254 * Object descriptor. Used as a template for object lookup/create/remove.
256 typedef struct ztest_od {
259 dmu_object_type_t od_type;
260 dmu_object_type_t od_crtype;
261 uint64_t od_blocksize;
262 uint64_t od_crblocksize;
265 char od_name[MAXNAMELEN];
271 typedef struct ztest_ds {
272 ztest_shared_ds_t *zd_shared;
274 krwlock_t zd_zilog_lock;
276 ztest_od_t *zd_od; /* debugging aid */
277 char zd_name[MAXNAMELEN];
278 kmutex_t zd_dirobj_lock;
279 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
280 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
284 * Per-iteration state.
286 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
288 typedef struct ztest_info {
289 ztest_func_t *zi_func; /* test function */
290 uint64_t zi_iters; /* iterations per execution */
291 uint64_t *zi_interval; /* execute every <interval> seconds */
294 typedef struct ztest_shared_callstate {
295 uint64_t zc_count; /* per-pass count */
296 uint64_t zc_time; /* per-pass time */
297 uint64_t zc_next; /* next time to call this function */
298 } ztest_shared_callstate_t;
300 static ztest_shared_callstate_t *ztest_shared_callstate;
301 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
304 * Note: these aren't static because we want dladdr() to work.
306 ztest_func_t ztest_dmu_read_write;
307 ztest_func_t ztest_dmu_write_parallel;
308 ztest_func_t ztest_dmu_object_alloc_free;
309 ztest_func_t ztest_dmu_commit_callbacks;
310 ztest_func_t ztest_zap;
311 ztest_func_t ztest_zap_parallel;
312 ztest_func_t ztest_zil_commit;
313 ztest_func_t ztest_zil_remount;
314 ztest_func_t ztest_dmu_read_write_zcopy;
315 ztest_func_t ztest_dmu_objset_create_destroy;
316 ztest_func_t ztest_dmu_prealloc;
317 ztest_func_t ztest_fzap;
318 ztest_func_t ztest_dmu_snapshot_create_destroy;
319 ztest_func_t ztest_dsl_prop_get_set;
320 ztest_func_t ztest_spa_prop_get_set;
321 ztest_func_t ztest_spa_create_destroy;
322 ztest_func_t ztest_fault_inject;
323 ztest_func_t ztest_ddt_repair;
324 ztest_func_t ztest_dmu_snapshot_hold;
325 ztest_func_t ztest_spa_rename;
326 ztest_func_t ztest_scrub;
327 ztest_func_t ztest_dsl_dataset_promote_busy;
328 ztest_func_t ztest_vdev_attach_detach;
329 ztest_func_t ztest_vdev_LUN_growth;
330 ztest_func_t ztest_vdev_add_remove;
331 ztest_func_t ztest_vdev_aux_add_remove;
332 ztest_func_t ztest_split_pool;
333 ztest_func_t ztest_reguid;
335 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
336 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
337 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
338 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
339 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
341 ztest_info_t ztest_info[] = {
342 { ztest_dmu_read_write, 1, &zopt_always },
343 { ztest_dmu_write_parallel, 10, &zopt_always },
344 { ztest_dmu_object_alloc_free, 1, &zopt_always },
345 { ztest_dmu_commit_callbacks, 1, &zopt_always },
346 { ztest_zap, 30, &zopt_always },
347 { ztest_zap_parallel, 100, &zopt_always },
348 { ztest_split_pool, 1, &zopt_always },
349 { ztest_zil_commit, 1, &zopt_incessant },
350 { ztest_zil_remount, 1, &zopt_sometimes },
351 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
352 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
353 { ztest_dsl_prop_get_set, 1, &zopt_often },
354 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
356 { ztest_dmu_prealloc, 1, &zopt_sometimes },
358 { ztest_fzap, 1, &zopt_sometimes },
359 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
360 { ztest_spa_create_destroy, 1, &zopt_sometimes },
361 { ztest_fault_inject, 1, &zopt_sometimes },
362 { ztest_ddt_repair, 1, &zopt_sometimes },
363 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
364 { ztest_reguid, 1, &zopt_sometimes },
365 { ztest_spa_rename, 1, &zopt_rarely },
366 { ztest_scrub, 1, &zopt_rarely },
367 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
368 { ztest_vdev_attach_detach, 1, &zopt_rarely },
369 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
370 { ztest_vdev_add_remove, 1,
371 &ztest_opts.zo_vdevtime },
372 { ztest_vdev_aux_add_remove, 1,
373 &ztest_opts.zo_vdevtime },
376 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
379 * The following struct is used to hold a list of uncalled commit callbacks.
380 * The callbacks are ordered by txg number.
382 typedef struct ztest_cb_list {
383 kmutex_t zcl_callbacks_lock;
384 list_t zcl_callbacks;
388 * Stuff we need to share writably between parent and child.
390 typedef struct ztest_shared {
391 boolean_t zs_do_init;
392 hrtime_t zs_proc_start;
393 hrtime_t zs_proc_stop;
394 hrtime_t zs_thread_start;
395 hrtime_t zs_thread_stop;
396 hrtime_t zs_thread_kill;
397 uint64_t zs_enospc_count;
398 uint64_t zs_vdev_next_leaf;
399 uint64_t zs_vdev_aux;
404 uint64_t zs_metaslab_sz;
405 uint64_t zs_metaslab_df_alloc_threshold;
409 #define ID_PARALLEL -1ULL
411 static char ztest_dev_template[] = "%s/%s.%llua";
412 static char ztest_aux_template[] = "%s/%s.%s.%llu";
413 ztest_shared_t *ztest_shared;
415 static spa_t *ztest_spa = NULL;
416 static ztest_ds_t *ztest_ds;
418 static kmutex_t ztest_vdev_lock;
421 * The ztest_name_lock protects the pool and dataset namespace used by
422 * the individual tests. To modify the namespace, consumers must grab
423 * this lock as writer. Grabbing the lock as reader will ensure that the
424 * namespace does not change while the lock is held.
426 static krwlock_t ztest_name_lock;
428 static boolean_t ztest_dump_core = B_TRUE;
429 static boolean_t ztest_exiting;
431 /* Global commit callback list */
432 static ztest_cb_list_t zcl;
433 /* Commit cb delay */
434 static uint64_t zc_min_txg_delay = UINT64_MAX;
435 static int zc_cb_counter = 0;
438 * Minimum number of commit callbacks that need to be registered for us to check
439 * whether the minimum txg delay is acceptable.
441 #define ZTEST_COMMIT_CB_MIN_REG 100
444 * If a number of txgs equal to this threshold have been created after a commit
445 * callback has been registered but not called, then we assume there is an
446 * implementation bug.
448 #define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
450 extern uint64_t metaslab_gang_bang;
451 extern uint64_t metaslab_df_alloc_threshold;
454 ZTEST_META_DNODE = 0,
459 static void usage(boolean_t) __NORETURN;
462 * These libumem hooks provide a reasonable set of defaults for the allocator's
463 * debugging facilities.
466 _umem_debug_init(void)
468 return ("default,verbose"); /* $UMEM_DEBUG setting */
472 _umem_logging_init(void)
474 return ("fail,contents"); /* $UMEM_LOGGING setting */
477 #define FATAL_MSG_SZ 1024
482 fatal(int do_perror, char *message, ...)
485 int save_errno = errno;
488 (void) fflush(stdout);
489 buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL);
491 va_start(args, message);
492 (void) sprintf(buf, "ztest: ");
494 (void) vsprintf(buf + strlen(buf), message, args);
497 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
498 ": %s", strerror(save_errno));
500 (void) fprintf(stderr, "%s\n", buf);
501 fatal_msg = buf; /* to ease debugging */
508 str2shift(const char *buf)
510 const char *ends = "BKMGTPEZ";
515 for (i = 0; i < strlen(ends); i++) {
516 if (toupper(buf[0]) == ends[i])
519 if (i == strlen(ends)) {
520 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
524 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
527 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
533 nicenumtoull(const char *buf)
538 val = strtoull(buf, &end, 0);
540 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
542 } else if (end[0] == '.') {
543 double fval = strtod(buf, &end);
544 fval *= pow(2, str2shift(end));
545 if (fval > UINT64_MAX) {
546 (void) fprintf(stderr, "ztest: value too large: %s\n",
550 val = (uint64_t)fval;
552 int shift = str2shift(end);
553 if (shift >= 64 || (val << shift) >> shift != val) {
554 (void) fprintf(stderr, "ztest: value too large: %s\n",
564 usage(boolean_t requested)
566 const ztest_shared_opts_t *zo = &ztest_opts_defaults;
568 char nice_vdev_size[10];
569 char nice_gang_bang[10];
570 FILE *fp = requested ? stdout : stderr;
572 nicenum(zo->zo_vdev_size, nice_vdev_size);
573 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
575 (void) fprintf(fp, "Usage: %s\n"
576 "\t[-v vdevs (default: %llu)]\n"
577 "\t[-s size_of_each_vdev (default: %s)]\n"
578 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
579 "\t[-m mirror_copies (default: %d)]\n"
580 "\t[-r raidz_disks (default: %d)]\n"
581 "\t[-R raidz_parity (default: %d)]\n"
582 "\t[-d datasets (default: %d)]\n"
583 "\t[-t threads (default: %d)]\n"
584 "\t[-g gang_block_threshold (default: %s)]\n"
585 "\t[-i init_count (default: %d)] initialize pool i times\n"
586 "\t[-k kill_percentage (default: %llu%%)]\n"
587 "\t[-p pool_name (default: %s)]\n"
588 "\t[-f dir (default: %s)] file directory for vdev files\n"
589 "\t[-V] verbose (use multiple times for ever more blather)\n"
590 "\t[-E] use existing pool instead of creating new one\n"
591 "\t[-T time (default: %llu sec)] total run time\n"
592 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
593 "\t[-P passtime (default: %llu sec)] time per pass\n"
594 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
595 "\t[-h] (print help)\n"
598 (u_longlong_t)zo->zo_vdevs, /* -v */
599 nice_vdev_size, /* -s */
600 zo->zo_ashift, /* -a */
601 zo->zo_mirrors, /* -m */
602 zo->zo_raidz, /* -r */
603 zo->zo_raidz_parity, /* -R */
604 zo->zo_datasets, /* -d */
605 zo->zo_threads, /* -t */
606 nice_gang_bang, /* -g */
607 zo->zo_init, /* -i */
608 (u_longlong_t)zo->zo_killrate, /* -k */
609 zo->zo_pool, /* -p */
611 (u_longlong_t)zo->zo_time, /* -T */
612 (u_longlong_t)zo->zo_maxloops, /* -F */
613 (u_longlong_t)zo->zo_passtime);
614 exit(requested ? 0 : 1);
618 process_options(int argc, char **argv)
621 ztest_shared_opts_t *zo = &ztest_opts;
625 char altdir[MAXNAMELEN] = { 0 };
627 bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
629 while ((opt = getopt(argc, argv,
630 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
647 value = nicenumtoull(optarg);
651 zo->zo_vdevs = value;
654 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
657 zo->zo_ashift = value;
660 zo->zo_mirrors = value;
663 zo->zo_raidz = MAX(1, value);
666 zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
669 zo->zo_datasets = MAX(1, value);
672 zo->zo_threads = MAX(1, value);
675 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
682 zo->zo_killrate = value;
685 (void) strlcpy(zo->zo_pool, optarg,
686 sizeof (zo->zo_pool));
689 path = realpath(optarg, NULL);
691 (void) fprintf(stderr, "error: %s: %s\n",
692 optarg, strerror(errno));
695 (void) strlcpy(zo->zo_dir, path,
696 sizeof (zo->zo_dir));
709 zo->zo_passtime = MAX(1, value);
712 zo->zo_maxloops = MAX(1, value);
715 (void) strlcpy(altdir, optarg, sizeof (altdir));
727 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
730 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
733 if (strlen(altdir) > 0) {
741 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
742 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
744 VERIFY(NULL != realpath(getexecname(), cmd));
745 if (0 != access(altdir, F_OK)) {
746 ztest_dump_core = B_FALSE;
747 fatal(B_TRUE, "invalid alternate ztest path: %s",
750 VERIFY(NULL != realpath(altdir, realaltdir));
753 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
754 * We want to extract <isa> to determine if we should use
755 * 32 or 64 bit binaries.
757 bin = strstr(cmd, "/usr/bin/");
758 ztest = strstr(bin, "/ztest");
760 isalen = ztest - isa;
761 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
762 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
763 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
764 "%s/usr/lib/%.*s", realaltdir, isalen, isa);
766 if (0 != access(zo->zo_alt_ztest, X_OK)) {
767 ztest_dump_core = B_FALSE;
768 fatal(B_TRUE, "invalid alternate ztest: %s",
770 } else if (0 != access(zo->zo_alt_libpath, X_OK)) {
771 ztest_dump_core = B_FALSE;
772 fatal(B_TRUE, "invalid alternate lib directory %s",
776 umem_free(cmd, MAXPATHLEN);
777 umem_free(realaltdir, MAXPATHLEN);
782 ztest_kill(ztest_shared_t *zs)
784 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
785 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
786 (void) kill(getpid(), SIGKILL);
790 ztest_random(uint64_t range)
794 ASSERT3S(ztest_fd_rand, >=, 0);
799 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
800 fatal(1, "short read from /dev/urandom");
807 ztest_record_enospc(const char *s)
809 ztest_shared->zs_enospc_count++;
813 ztest_get_ashift(void)
815 if (ztest_opts.zo_ashift == 0)
816 return (SPA_MINBLOCKSHIFT + ztest_random(3));
817 return (ztest_opts.zo_ashift);
821 make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift)
827 pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
830 ashift = ztest_get_ashift();
836 vdev = ztest_shared->zs_vdev_aux;
837 (void) snprintf(path, MAXPATHLEN,
838 ztest_aux_template, ztest_opts.zo_dir,
839 ztest_opts.zo_pool, aux, vdev);
841 vdev = ztest_shared->zs_vdev_next_leaf++;
842 (void) snprintf(path, MAXPATHLEN,
843 ztest_dev_template, ztest_opts.zo_dir,
844 ztest_opts.zo_pool, vdev);
849 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
851 fatal(1, "can't open %s", path);
852 if (ftruncate(fd, size) != 0)
853 fatal(1, "can't ftruncate %s", path);
857 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
858 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
859 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
860 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
861 umem_free(pathbuf, MAXPATHLEN);
867 make_vdev_raidz(char *path, char *aux, size_t size, uint64_t ashift, int r)
869 nvlist_t *raidz, **child;
873 return (make_vdev_file(path, aux, size, ashift));
874 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
876 for (c = 0; c < r; c++)
877 child[c] = make_vdev_file(path, aux, size, ashift);
879 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
880 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
881 VDEV_TYPE_RAIDZ) == 0);
882 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
883 ztest_opts.zo_raidz_parity) == 0);
884 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
887 for (c = 0; c < r; c++)
888 nvlist_free(child[c]);
890 umem_free(child, r * sizeof (nvlist_t *));
896 make_vdev_mirror(char *path, char *aux, size_t size, uint64_t ashift,
899 nvlist_t *mirror, **child;
903 return (make_vdev_raidz(path, aux, size, ashift, r));
905 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
907 for (c = 0; c < m; c++)
908 child[c] = make_vdev_raidz(path, aux, size, ashift, r);
910 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
911 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
912 VDEV_TYPE_MIRROR) == 0);
913 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
916 for (c = 0; c < m; c++)
917 nvlist_free(child[c]);
919 umem_free(child, m * sizeof (nvlist_t *));
925 make_vdev_root(char *path, char *aux, size_t size, uint64_t ashift,
926 int log, int r, int m, int t)
928 nvlist_t *root, **child;
933 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
935 for (c = 0; c < t; c++) {
936 child[c] = make_vdev_mirror(path, aux, size, ashift, r, m);
937 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
941 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
942 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
943 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
946 for (c = 0; c < t; c++)
947 nvlist_free(child[c]);
949 umem_free(child, t * sizeof (nvlist_t *));
955 ztest_random_blocksize(void)
957 return (1 << (SPA_MINBLOCKSHIFT +
958 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
962 ztest_random_ibshift(void)
964 return (DN_MIN_INDBLKSHIFT +
965 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
969 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
972 vdev_t *rvd = spa->spa_root_vdev;
975 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
978 top = ztest_random(rvd->vdev_children);
979 tvd = rvd->vdev_child[top];
980 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
981 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
987 ztest_random_dsl_prop(zfs_prop_t prop)
992 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
993 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
999 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
1002 const char *propname = zfs_prop_to_name(prop);
1003 const char *valname;
1008 error = dsl_prop_set(osname, propname,
1009 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL),
1010 sizeof (value), 1, &value);
1012 if (error == ENOSPC) {
1013 ztest_record_enospc(FTAG);
1016 ASSERT3U(error, ==, 0);
1018 setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
1019 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
1020 1, &curval, setpoint), ==, 0);
1022 if (ztest_opts.zo_verbose >= 6) {
1023 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
1024 (void) printf("%s %s = %s at '%s'\n",
1025 osname, propname, valname, setpoint);
1027 umem_free(setpoint, MAXPATHLEN);
1033 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
1035 spa_t *spa = ztest_spa;
1036 nvlist_t *props = NULL;
1039 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
1040 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
1042 error = spa_prop_set(spa, props);
1046 if (error == ENOSPC) {
1047 ztest_record_enospc(FTAG);
1050 ASSERT3U(error, ==, 0);
1056 ztest_rll_init(rll_t *rll)
1058 rll->rll_writer = NULL;
1059 rll->rll_readers = 0;
1060 mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL);
1061 cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL);
1065 ztest_rll_destroy(rll_t *rll)
1067 ASSERT(rll->rll_writer == NULL);
1068 ASSERT(rll->rll_readers == 0);
1069 mutex_destroy(&rll->rll_lock);
1070 cv_destroy(&rll->rll_cv);
1074 ztest_rll_lock(rll_t *rll, rl_type_t type)
1076 mutex_enter(&rll->rll_lock);
1078 if (type == RL_READER) {
1079 while (rll->rll_writer != NULL)
1080 (void) cv_wait(&rll->rll_cv, &rll->rll_lock);
1083 while (rll->rll_writer != NULL || rll->rll_readers)
1084 (void) cv_wait(&rll->rll_cv, &rll->rll_lock);
1085 rll->rll_writer = curthread;
1088 mutex_exit(&rll->rll_lock);
1092 ztest_rll_unlock(rll_t *rll)
1094 mutex_enter(&rll->rll_lock);
1096 if (rll->rll_writer) {
1097 ASSERT(rll->rll_readers == 0);
1098 rll->rll_writer = NULL;
1100 ASSERT(rll->rll_readers != 0);
1101 ASSERT(rll->rll_writer == NULL);
1105 if (rll->rll_writer == NULL && rll->rll_readers == 0)
1106 cv_broadcast(&rll->rll_cv);
1108 mutex_exit(&rll->rll_lock);
1112 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1114 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1116 ztest_rll_lock(rll, type);
1120 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
1122 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1124 ztest_rll_unlock(rll);
1128 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
1129 uint64_t size, rl_type_t type)
1131 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
1132 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
1135 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
1136 rl->rl_object = object;
1137 rl->rl_offset = offset;
1141 ztest_rll_lock(rll, type);
1147 ztest_range_unlock(rl_t *rl)
1149 rll_t *rll = rl->rl_lock;
1151 ztest_rll_unlock(rll);
1153 umem_free(rl, sizeof (*rl));
1157 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1160 zd->zd_zilog = dmu_objset_zil(os);
1161 zd->zd_shared = szd;
1162 dmu_objset_name(os, zd->zd_name);
1165 if (zd->zd_shared != NULL)
1166 zd->zd_shared->zd_seq = 0;
1168 rw_init(&zd->zd_zilog_lock, NULL, RW_DEFAULT, NULL);
1169 mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL);
1171 for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1172 ztest_rll_init(&zd->zd_object_lock[l]);
1174 for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
1175 ztest_rll_init(&zd->zd_range_lock[l]);
1179 ztest_zd_fini(ztest_ds_t *zd)
1183 mutex_destroy(&zd->zd_dirobj_lock);
1184 rw_destroy(&zd->zd_zilog_lock);
1186 for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1187 ztest_rll_destroy(&zd->zd_object_lock[l]);
1189 for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
1190 ztest_rll_destroy(&zd->zd_range_lock[l]);
1193 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1196 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1202 * Attempt to assign tx to some transaction group.
1204 error = dmu_tx_assign(tx, txg_how);
1206 if (error == ERESTART) {
1207 ASSERT(txg_how == TXG_NOWAIT);
1210 ASSERT3U(error, ==, ENOSPC);
1211 ztest_record_enospc(tag);
1216 txg = dmu_tx_get_txg(tx);
1222 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1225 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1233 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1236 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1240 diff |= (value - *ip++);
1247 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1248 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1250 bt->bt_magic = BT_MAGIC;
1251 bt->bt_objset = dmu_objset_id(os);
1252 bt->bt_object = object;
1253 bt->bt_offset = offset;
1256 bt->bt_crtxg = crtxg;
1260 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1261 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1263 ASSERT(bt->bt_magic == BT_MAGIC);
1264 ASSERT(bt->bt_objset == dmu_objset_id(os));
1265 ASSERT(bt->bt_object == object);
1266 ASSERT(bt->bt_offset == offset);
1267 ASSERT(bt->bt_gen <= gen);
1268 ASSERT(bt->bt_txg <= txg);
1269 ASSERT(bt->bt_crtxg == crtxg);
1272 static ztest_block_tag_t *
1273 ztest_bt_bonus(dmu_buf_t *db)
1275 dmu_object_info_t doi;
1276 ztest_block_tag_t *bt;
1278 dmu_object_info_from_db(db, &doi);
1279 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1280 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1281 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1290 #define lrz_type lr_mode
1291 #define lrz_blocksize lr_uid
1292 #define lrz_ibshift lr_gid
1293 #define lrz_bonustype lr_rdev
1294 #define lrz_bonuslen lr_crtime[1]
1297 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1299 char *name = (void *)(lr + 1); /* name follows lr */
1300 size_t namesize = strlen(name) + 1;
1303 if (zil_replaying(zd->zd_zilog, tx))
1306 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1307 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1308 sizeof (*lr) + namesize - sizeof (lr_t));
1310 zil_itx_assign(zd->zd_zilog, itx, tx);
1314 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1316 char *name = (void *)(lr + 1); /* name follows lr */
1317 size_t namesize = strlen(name) + 1;
1320 if (zil_replaying(zd->zd_zilog, tx))
1323 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1324 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1325 sizeof (*lr) + namesize - sizeof (lr_t));
1327 itx->itx_oid = object;
1328 zil_itx_assign(zd->zd_zilog, itx, tx);
1332 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1335 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1337 if (zil_replaying(zd->zd_zilog, tx))
1340 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1341 write_state = WR_INDIRECT;
1343 itx = zil_itx_create(TX_WRITE,
1344 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1346 if (write_state == WR_COPIED &&
1347 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1348 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1349 zil_itx_destroy(itx);
1350 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1351 write_state = WR_NEED_COPY;
1353 itx->itx_private = zd;
1354 itx->itx_wr_state = write_state;
1355 itx->itx_sync = (ztest_random(8) == 0);
1356 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1358 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1359 sizeof (*lr) - sizeof (lr_t));
1361 zil_itx_assign(zd->zd_zilog, itx, tx);
1365 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1369 if (zil_replaying(zd->zd_zilog, tx))
1372 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1373 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1374 sizeof (*lr) - sizeof (lr_t));
1376 itx->itx_sync = B_FALSE;
1377 zil_itx_assign(zd->zd_zilog, itx, tx);
1381 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1385 if (zil_replaying(zd->zd_zilog, tx))
1388 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1389 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1390 sizeof (*lr) - sizeof (lr_t));
1392 itx->itx_sync = B_FALSE;
1393 zil_itx_assign(zd->zd_zilog, itx, tx);
1400 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1402 char *name = (void *)(lr + 1); /* name follows lr */
1403 objset_t *os = zd->zd_os;
1404 ztest_block_tag_t *bbt;
1411 byteswap_uint64_array(lr, sizeof (*lr));
1413 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1414 ASSERT(name[0] != '\0');
1416 tx = dmu_tx_create(os);
1418 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1420 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1421 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1423 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1426 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1430 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1432 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1433 if (lr->lr_foid == 0) {
1434 lr->lr_foid = zap_create(os,
1435 lr->lrz_type, lr->lrz_bonustype,
1436 lr->lrz_bonuslen, tx);
1438 error = zap_create_claim(os, lr->lr_foid,
1439 lr->lrz_type, lr->lrz_bonustype,
1440 lr->lrz_bonuslen, tx);
1443 if (lr->lr_foid == 0) {
1444 lr->lr_foid = dmu_object_alloc(os,
1445 lr->lrz_type, 0, lr->lrz_bonustype,
1446 lr->lrz_bonuslen, tx);
1448 error = dmu_object_claim(os, lr->lr_foid,
1449 lr->lrz_type, 0, lr->lrz_bonustype,
1450 lr->lrz_bonuslen, tx);
1455 ASSERT3U(error, ==, EEXIST);
1456 ASSERT(zd->zd_zilog->zl_replay);
1461 ASSERT(lr->lr_foid != 0);
1463 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1464 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1465 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1467 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1468 bbt = ztest_bt_bonus(db);
1469 dmu_buf_will_dirty(db, tx);
1470 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1471 dmu_buf_rele(db, FTAG);
1473 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1476 (void) ztest_log_create(zd, tx, lr);
1484 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1486 char *name = (void *)(lr + 1); /* name follows lr */
1487 objset_t *os = zd->zd_os;
1488 dmu_object_info_t doi;
1490 uint64_t object, txg;
1493 byteswap_uint64_array(lr, sizeof (*lr));
1495 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1496 ASSERT(name[0] != '\0');
1499 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1500 ASSERT(object != 0);
1502 ztest_object_lock(zd, object, RL_WRITER);
1504 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1506 tx = dmu_tx_create(os);
1508 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1509 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1511 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1513 ztest_object_unlock(zd, object);
1517 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1518 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1520 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1523 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1525 (void) ztest_log_remove(zd, tx, lr, object);
1529 ztest_object_unlock(zd, object);
1535 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1537 objset_t *os = zd->zd_os;
1538 void *data = lr + 1; /* data follows lr */
1539 uint64_t offset, length;
1540 ztest_block_tag_t *bt = data;
1541 ztest_block_tag_t *bbt;
1542 uint64_t gen, txg, lrtxg, crtxg;
1543 dmu_object_info_t doi;
1546 arc_buf_t *abuf = NULL;
1550 byteswap_uint64_array(lr, sizeof (*lr));
1552 offset = lr->lr_offset;
1553 length = lr->lr_length;
1555 /* If it's a dmu_sync() block, write the whole block */
1556 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1557 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1558 if (length < blocksize) {
1559 offset -= offset % blocksize;
1564 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1565 byteswap_uint64_array(bt, sizeof (*bt));
1567 if (bt->bt_magic != BT_MAGIC)
1570 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1571 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1573 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1575 dmu_object_info_from_db(db, &doi);
1577 bbt = ztest_bt_bonus(db);
1578 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1580 crtxg = bbt->bt_crtxg;
1581 lrtxg = lr->lr_common.lrc_txg;
1583 tx = dmu_tx_create(os);
1585 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1587 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1588 P2PHASE(offset, length) == 0)
1589 abuf = dmu_request_arcbuf(db, length);
1591 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1594 dmu_return_arcbuf(abuf);
1595 dmu_buf_rele(db, FTAG);
1596 ztest_range_unlock(rl);
1597 ztest_object_unlock(zd, lr->lr_foid);
1603 * Usually, verify the old data before writing new data --
1604 * but not always, because we also want to verify correct
1605 * behavior when the data was not recently read into cache.
1607 ASSERT(offset % doi.doi_data_block_size == 0);
1608 if (ztest_random(4) != 0) {
1609 int prefetch = ztest_random(2) ?
1610 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1611 ztest_block_tag_t rbt;
1613 VERIFY(dmu_read(os, lr->lr_foid, offset,
1614 sizeof (rbt), &rbt, prefetch) == 0);
1615 if (rbt.bt_magic == BT_MAGIC) {
1616 ztest_bt_verify(&rbt, os, lr->lr_foid,
1617 offset, gen, txg, crtxg);
1622 * Writes can appear to be newer than the bonus buffer because
1623 * the ztest_get_data() callback does a dmu_read() of the
1624 * open-context data, which may be different than the data
1625 * as it was when the write was generated.
1627 if (zd->zd_zilog->zl_replay) {
1628 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1629 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1634 * Set the bt's gen/txg to the bonus buffer's gen/txg
1635 * so that all of the usual ASSERTs will work.
1637 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1641 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1643 bcopy(data, abuf->b_data, length);
1644 dmu_assign_arcbuf(db, offset, abuf, tx);
1647 (void) ztest_log_write(zd, tx, lr);
1649 dmu_buf_rele(db, FTAG);
1653 ztest_range_unlock(rl);
1654 ztest_object_unlock(zd, lr->lr_foid);
1660 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1662 objset_t *os = zd->zd_os;
1668 byteswap_uint64_array(lr, sizeof (*lr));
1670 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1671 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1674 tx = dmu_tx_create(os);
1676 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1678 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1680 ztest_range_unlock(rl);
1681 ztest_object_unlock(zd, lr->lr_foid);
1685 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1686 lr->lr_length, tx) == 0);
1688 (void) ztest_log_truncate(zd, tx, lr);
1692 ztest_range_unlock(rl);
1693 ztest_object_unlock(zd, lr->lr_foid);
1699 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1701 objset_t *os = zd->zd_os;
1704 ztest_block_tag_t *bbt;
1705 uint64_t txg, lrtxg, crtxg;
1708 byteswap_uint64_array(lr, sizeof (*lr));
1710 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1712 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1714 tx = dmu_tx_create(os);
1715 dmu_tx_hold_bonus(tx, lr->lr_foid);
1717 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1719 dmu_buf_rele(db, FTAG);
1720 ztest_object_unlock(zd, lr->lr_foid);
1724 bbt = ztest_bt_bonus(db);
1725 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1726 crtxg = bbt->bt_crtxg;
1727 lrtxg = lr->lr_common.lrc_txg;
1729 if (zd->zd_zilog->zl_replay) {
1730 ASSERT(lr->lr_size != 0);
1731 ASSERT(lr->lr_mode != 0);
1735 * Randomly change the size and increment the generation.
1737 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1739 lr->lr_mode = bbt->bt_gen + 1;
1744 * Verify that the current bonus buffer is not newer than our txg.
1746 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1747 MAX(txg, lrtxg), crtxg);
1749 dmu_buf_will_dirty(db, tx);
1751 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1752 ASSERT3U(lr->lr_size, <=, db->db_size);
1753 VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0);
1754 bbt = ztest_bt_bonus(db);
1756 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1758 dmu_buf_rele(db, FTAG);
1760 (void) ztest_log_setattr(zd, tx, lr);
1764 ztest_object_unlock(zd, lr->lr_foid);
1769 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1770 NULL, /* 0 no such transaction type */
1771 (zil_replay_func_t *)ztest_replay_create, /* TX_CREATE */
1772 NULL, /* TX_MKDIR */
1773 NULL, /* TX_MKXATTR */
1774 NULL, /* TX_SYMLINK */
1775 (zil_replay_func_t *)ztest_replay_remove, /* TX_REMOVE */
1776 NULL, /* TX_RMDIR */
1778 NULL, /* TX_RENAME */
1779 (zil_replay_func_t *)ztest_replay_write, /* TX_WRITE */
1780 (zil_replay_func_t *)ztest_replay_truncate, /* TX_TRUNCATE */
1781 (zil_replay_func_t *)ztest_replay_setattr, /* TX_SETATTR */
1783 NULL, /* TX_CREATE_ACL */
1784 NULL, /* TX_CREATE_ATTR */
1785 NULL, /* TX_CREATE_ACL_ATTR */
1786 NULL, /* TX_MKDIR_ACL */
1787 NULL, /* TX_MKDIR_ATTR */
1788 NULL, /* TX_MKDIR_ACL_ATTR */
1789 NULL, /* TX_WRITE2 */
1793 * ZIL get_data callbacks
1797 ztest_get_done(zgd_t *zgd, int error)
1799 ztest_ds_t *zd = zgd->zgd_private;
1800 uint64_t object = zgd->zgd_rl->rl_object;
1803 dmu_buf_rele(zgd->zgd_db, zgd);
1805 ztest_range_unlock(zgd->zgd_rl);
1806 ztest_object_unlock(zd, object);
1808 if (error == 0 && zgd->zgd_bp)
1809 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1811 umem_free(zgd, sizeof (*zgd));
1815 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1817 ztest_ds_t *zd = arg;
1818 objset_t *os = zd->zd_os;
1819 uint64_t object = lr->lr_foid;
1820 uint64_t offset = lr->lr_offset;
1821 uint64_t size = lr->lr_length;
1822 blkptr_t *bp = &lr->lr_blkptr;
1823 uint64_t txg = lr->lr_common.lrc_txg;
1825 dmu_object_info_t doi;
1830 ztest_object_lock(zd, object, RL_READER);
1831 error = dmu_bonus_hold(os, object, FTAG, &db);
1833 ztest_object_unlock(zd, object);
1837 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1839 if (crtxg == 0 || crtxg > txg) {
1840 dmu_buf_rele(db, FTAG);
1841 ztest_object_unlock(zd, object);
1845 dmu_object_info_from_db(db, &doi);
1846 dmu_buf_rele(db, FTAG);
1849 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1850 zgd->zgd_zilog = zd->zd_zilog;
1851 zgd->zgd_private = zd;
1853 if (buf != NULL) { /* immediate write */
1854 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1857 error = dmu_read(os, object, offset, size, buf,
1858 DMU_READ_NO_PREFETCH);
1861 size = doi.doi_data_block_size;
1863 offset = P2ALIGN(offset, size);
1865 ASSERT(offset < size);
1869 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1872 error = dmu_buf_hold(os, object, offset, zgd, &db,
1873 DMU_READ_NO_PREFETCH);
1879 ASSERT(db->db_offset == offset);
1880 ASSERT(db->db_size == size);
1882 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1883 ztest_get_done, zgd);
1890 ztest_get_done(zgd, error);
1896 ztest_lr_alloc(size_t lrsize, char *name)
1899 size_t namesize = name ? strlen(name) + 1 : 0;
1901 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1904 bcopy(name, lr + lrsize, namesize);
1910 ztest_lr_free(void *lr, size_t lrsize, char *name)
1912 size_t namesize = name ? strlen(name) + 1 : 0;
1914 umem_free(lr, lrsize + namesize);
1918 * Lookup a bunch of objects. Returns the number of objects not found.
1921 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1927 ASSERT(mutex_held(&zd->zd_dirobj_lock));
1929 for (i = 0; i < count; i++, od++) {
1931 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1932 sizeof (uint64_t), 1, &od->od_object);
1934 ASSERT(error == ENOENT);
1935 ASSERT(od->od_object == 0);
1939 ztest_block_tag_t *bbt;
1940 dmu_object_info_t doi;
1942 ASSERT(od->od_object != 0);
1943 ASSERT(missing == 0); /* there should be no gaps */
1945 ztest_object_lock(zd, od->od_object, RL_READER);
1946 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1947 od->od_object, FTAG, &db));
1948 dmu_object_info_from_db(db, &doi);
1949 bbt = ztest_bt_bonus(db);
1950 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1951 od->od_type = doi.doi_type;
1952 od->od_blocksize = doi.doi_data_block_size;
1953 od->od_gen = bbt->bt_gen;
1954 dmu_buf_rele(db, FTAG);
1955 ztest_object_unlock(zd, od->od_object);
1963 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1968 ASSERT(mutex_held(&zd->zd_dirobj_lock));
1970 for (i = 0; i < count; i++, od++) {
1977 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1979 lr->lr_doid = od->od_dir;
1980 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
1981 lr->lrz_type = od->od_crtype;
1982 lr->lrz_blocksize = od->od_crblocksize;
1983 lr->lrz_ibshift = ztest_random_ibshift();
1984 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
1985 lr->lrz_bonuslen = dmu_bonus_max();
1986 lr->lr_gen = od->od_crgen;
1987 lr->lr_crtime[0] = time(NULL);
1989 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
1990 ASSERT(missing == 0);
1994 od->od_object = lr->lr_foid;
1995 od->od_type = od->od_crtype;
1996 od->od_blocksize = od->od_crblocksize;
1997 od->od_gen = od->od_crgen;
1998 ASSERT(od->od_object != 0);
2001 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2008 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
2014 ASSERT(mutex_held(&zd->zd_dirobj_lock));
2018 for (i = count - 1; i >= 0; i--, od--) {
2024 if (od->od_object == 0)
2027 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
2029 lr->lr_doid = od->od_dir;
2031 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
2032 ASSERT3U(error, ==, ENOSPC);
2037 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2044 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
2050 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
2052 lr->lr_foid = object;
2053 lr->lr_offset = offset;
2054 lr->lr_length = size;
2056 BP_ZERO(&lr->lr_blkptr);
2058 bcopy(data, lr + 1, size);
2060 error = ztest_replay_write(zd, lr, B_FALSE);
2062 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
2068 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2073 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2075 lr->lr_foid = object;
2076 lr->lr_offset = offset;
2077 lr->lr_length = size;
2079 error = ztest_replay_truncate(zd, lr, B_FALSE);
2081 ztest_lr_free(lr, sizeof (*lr), NULL);
2087 ztest_setattr(ztest_ds_t *zd, uint64_t object)
2092 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2094 lr->lr_foid = object;
2098 error = ztest_replay_setattr(zd, lr, B_FALSE);
2100 ztest_lr_free(lr, sizeof (*lr), NULL);
2106 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2108 objset_t *os = zd->zd_os;
2113 txg_wait_synced(dmu_objset_pool(os), 0);
2115 ztest_object_lock(zd, object, RL_READER);
2116 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
2118 tx = dmu_tx_create(os);
2120 dmu_tx_hold_write(tx, object, offset, size);
2122 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
2125 dmu_prealloc(os, object, offset, size, tx);
2127 txg_wait_synced(dmu_objset_pool(os), txg);
2129 (void) dmu_free_long_range(os, object, offset, size);
2132 ztest_range_unlock(rl);
2133 ztest_object_unlock(zd, object);
2137 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
2139 ztest_block_tag_t wbt;
2140 dmu_object_info_t doi;
2141 enum ztest_io_type io_type;
2145 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
2146 blocksize = doi.doi_data_block_size;
2147 data = umem_alloc(blocksize, UMEM_NOFAIL);
2150 * Pick an i/o type at random, biased toward writing block tags.
2152 io_type = ztest_random(ZTEST_IO_TYPES);
2153 if (ztest_random(2) == 0)
2154 io_type = ZTEST_IO_WRITE_TAG;
2156 (void) rw_enter(&zd->zd_zilog_lock, RW_READER);
2160 case ZTEST_IO_WRITE_TAG:
2161 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2162 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2165 case ZTEST_IO_WRITE_PATTERN:
2166 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2167 if (ztest_random(2) == 0) {
2169 * Induce fletcher2 collisions to ensure that
2170 * zio_ddt_collision() detects and resolves them
2171 * when using fletcher2-verify for deduplication.
2173 ((uint64_t *)data)[0] ^= 1ULL << 63;
2174 ((uint64_t *)data)[4] ^= 1ULL << 63;
2176 (void) ztest_write(zd, object, offset, blocksize, data);
2179 case ZTEST_IO_WRITE_ZEROES:
2180 bzero(data, blocksize);
2181 (void) ztest_write(zd, object, offset, blocksize, data);
2184 case ZTEST_IO_TRUNCATE:
2185 (void) ztest_truncate(zd, object, offset, blocksize);
2188 case ZTEST_IO_SETATTR:
2189 (void) ztest_setattr(zd, object);
2195 (void) rw_exit(&zd->zd_zilog_lock);
2197 umem_free(data, blocksize);
2201 * Initialize an object description template.
2204 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2205 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2207 od->od_dir = ZTEST_DIROBJ;
2210 od->od_crtype = type;
2211 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2214 od->od_type = DMU_OT_NONE;
2215 od->od_blocksize = 0;
2218 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2219 tag, (longlong_t)id, (u_longlong_t)index);
2223 * Lookup or create the objects for a test using the od template.
2224 * If the objects do not all exist, or if 'remove' is specified,
2225 * remove any existing objects and create new ones. Otherwise,
2226 * use the existing objects.
2229 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2231 int count = size / sizeof (*od);
2234 mutex_enter(&zd->zd_dirobj_lock);
2235 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2236 (ztest_remove(zd, od, count) != 0 ||
2237 ztest_create(zd, od, count) != 0))
2240 mutex_exit(&zd->zd_dirobj_lock);
2247 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2249 zilog_t *zilog = zd->zd_zilog;
2251 (void) rw_enter(&zd->zd_zilog_lock, RW_READER);
2253 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2256 * Remember the committed values in zd, which is in parent/child
2257 * shared memory. If we die, the next iteration of ztest_run()
2258 * will verify that the log really does contain this record.
2260 mutex_enter(&zilog->zl_lock);
2261 ASSERT(zd->zd_shared != NULL);
2262 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2263 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2264 mutex_exit(&zilog->zl_lock);
2266 (void) rw_exit(&zd->zd_zilog_lock);
2270 * This function is designed to simulate the operations that occur during a
2271 * mount/unmount operation. We hold the dataset across these operations in an
2272 * attempt to expose any implicit assumptions about ZIL management.
2276 ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2278 objset_t *os = zd->zd_os;
2280 mutex_enter(&zd->zd_dirobj_lock);
2281 (void) rw_enter(&zd->zd_zilog_lock, RW_WRITER);
2283 /* zfs_sb_teardown() */
2284 zil_close(zd->zd_zilog);
2286 /* zfsvfs_setup() */
2287 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2288 zil_replay(os, zd, ztest_replay_vector);
2290 (void) rw_exit(&zd->zd_zilog_lock);
2291 mutex_exit(&zd->zd_dirobj_lock);
2295 * Verify that we can't destroy an active pool, create an existing pool,
2296 * or create a pool with a bad vdev spec.
2300 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2302 ztest_shared_opts_t *zo = &ztest_opts;
2307 * Attempt to create using a bad file.
2309 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2310 VERIFY3U(ENOENT, ==,
2311 spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
2312 nvlist_free(nvroot);
2315 * Attempt to create using a bad mirror.
2317 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1);
2318 VERIFY3U(ENOENT, ==,
2319 spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
2320 nvlist_free(nvroot);
2323 * Attempt to create an existing pool. It shouldn't matter
2324 * what's in the nvroot; we should fail with EEXIST.
2326 (void) rw_enter(&ztest_name_lock, RW_READER);
2327 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2328 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
2329 nvlist_free(nvroot);
2330 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2331 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2332 spa_close(spa, FTAG);
2334 (void) rw_exit(&ztest_name_lock);
2338 vdev_lookup_by_path(vdev_t *vd, const char *path)
2343 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2346 for (c = 0; c < vd->vdev_children; c++)
2347 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2355 * Find the first available hole which can be used as a top-level.
2358 find_vdev_hole(spa_t *spa)
2360 vdev_t *rvd = spa->spa_root_vdev;
2363 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2365 for (c = 0; c < rvd->vdev_children; c++) {
2366 vdev_t *cvd = rvd->vdev_child[c];
2368 if (cvd->vdev_ishole)
2375 * Verify that vdev_add() works as expected.
2379 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2381 ztest_shared_t *zs = ztest_shared;
2382 spa_t *spa = ztest_spa;
2388 mutex_enter(&ztest_vdev_lock);
2390 MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2392 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2394 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2397 * If we have slogs then remove them 1/4 of the time.
2399 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2401 * Grab the guid from the head of the log class rotor.
2403 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2405 spa_config_exit(spa, SCL_VDEV, FTAG);
2408 * We have to grab the zs_name_lock as writer to
2409 * prevent a race between removing a slog (dmu_objset_find)
2410 * and destroying a dataset. Removing the slog will
2411 * grab a reference on the dataset which may cause
2412 * dmu_objset_destroy() to fail with EBUSY thus
2413 * leaving the dataset in an inconsistent state.
2415 rw_enter(&ztest_name_lock, RW_WRITER);
2416 error = spa_vdev_remove(spa, guid, B_FALSE);
2417 rw_exit(&ztest_name_lock);
2419 if (error && error != EEXIST)
2420 fatal(0, "spa_vdev_remove() = %d", error);
2422 spa_config_exit(spa, SCL_VDEV, FTAG);
2425 * Make 1/4 of the devices be log devices.
2427 nvroot = make_vdev_root(NULL, NULL,
2428 ztest_opts.zo_vdev_size, 0,
2429 ztest_random(4) == 0, ztest_opts.zo_raidz,
2432 error = spa_vdev_add(spa, nvroot);
2433 nvlist_free(nvroot);
2435 if (error == ENOSPC)
2436 ztest_record_enospc("spa_vdev_add");
2437 else if (error != 0)
2438 fatal(0, "spa_vdev_add() = %d", error);
2441 mutex_exit(&ztest_vdev_lock);
2445 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2449 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2451 ztest_shared_t *zs = ztest_shared;
2452 spa_t *spa = ztest_spa;
2453 vdev_t *rvd = spa->spa_root_vdev;
2454 spa_aux_vdev_t *sav;
2460 path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2462 if (ztest_random(2) == 0) {
2463 sav = &spa->spa_spares;
2464 aux = ZPOOL_CONFIG_SPARES;
2466 sav = &spa->spa_l2cache;
2467 aux = ZPOOL_CONFIG_L2CACHE;
2470 mutex_enter(&ztest_vdev_lock);
2472 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2474 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2476 * Pick a random device to remove.
2478 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2481 * Find an unused device we can add.
2483 zs->zs_vdev_aux = 0;
2486 (void) snprintf(path, sizeof (path), ztest_aux_template,
2487 ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
2489 for (c = 0; c < sav->sav_count; c++)
2490 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2493 if (c == sav->sav_count &&
2494 vdev_lookup_by_path(rvd, path) == NULL)
2500 spa_config_exit(spa, SCL_VDEV, FTAG);
2506 nvlist_t *nvroot = make_vdev_root(NULL, aux,
2507 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2508 error = spa_vdev_add(spa, nvroot);
2510 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2511 nvlist_free(nvroot);
2514 * Remove an existing device. Sometimes, dirty its
2515 * vdev state first to make sure we handle removal
2516 * of devices that have pending state changes.
2518 if (ztest_random(2) == 0)
2519 (void) vdev_online(spa, guid, 0, NULL);
2521 error = spa_vdev_remove(spa, guid, B_FALSE);
2522 if (error != 0 && error != EBUSY)
2523 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2526 mutex_exit(&ztest_vdev_lock);
2528 umem_free(path, MAXPATHLEN);
2532 * split a pool if it has mirror tlvdevs
2536 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2538 ztest_shared_t *zs = ztest_shared;
2539 spa_t *spa = ztest_spa;
2540 vdev_t *rvd = spa->spa_root_vdev;
2541 nvlist_t *tree, **child, *config, *split, **schild;
2542 uint_t c, children, schildren = 0, lastlogid = 0;
2545 mutex_enter(&ztest_vdev_lock);
2547 /* ensure we have a useable config; mirrors of raidz aren't supported */
2548 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2549 mutex_exit(&ztest_vdev_lock);
2553 /* clean up the old pool, if any */
2554 (void) spa_destroy("splitp");
2556 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2558 /* generate a config from the existing config */
2559 mutex_enter(&spa->spa_props_lock);
2560 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2562 mutex_exit(&spa->spa_props_lock);
2564 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2567 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2568 for (c = 0; c < children; c++) {
2569 vdev_t *tvd = rvd->vdev_child[c];
2573 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2574 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2576 VERIFY(nvlist_add_string(schild[schildren],
2577 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2578 VERIFY(nvlist_add_uint64(schild[schildren],
2579 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2581 lastlogid = schildren;
2586 VERIFY(nvlist_lookup_nvlist_array(child[c],
2587 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2588 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2591 /* OK, create a config that can be used to split */
2592 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2593 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2594 VDEV_TYPE_ROOT) == 0);
2595 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2596 lastlogid != 0 ? lastlogid : schildren) == 0);
2598 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2599 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2601 for (c = 0; c < schildren; c++)
2602 nvlist_free(schild[c]);
2606 spa_config_exit(spa, SCL_VDEV, FTAG);
2608 (void) rw_enter(&ztest_name_lock, RW_WRITER);
2609 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2610 (void) rw_exit(&ztest_name_lock);
2612 nvlist_free(config);
2615 (void) printf("successful split - results:\n");
2616 mutex_enter(&spa_namespace_lock);
2617 show_pool_stats(spa);
2618 show_pool_stats(spa_lookup("splitp"));
2619 mutex_exit(&spa_namespace_lock);
2623 mutex_exit(&ztest_vdev_lock);
2628 * Verify that we can attach and detach devices.
2632 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2634 ztest_shared_t *zs = ztest_shared;
2635 spa_t *spa = ztest_spa;
2636 spa_aux_vdev_t *sav = &spa->spa_spares;
2637 vdev_t *rvd = spa->spa_root_vdev;
2638 vdev_t *oldvd, *newvd, *pvd;
2642 uint64_t ashift = ztest_get_ashift();
2643 uint64_t oldguid, pguid;
2644 size_t oldsize, newsize;
2645 char *oldpath, *newpath;
2647 int oldvd_has_siblings = B_FALSE;
2648 int newvd_is_spare = B_FALSE;
2650 int error, expected_error;
2652 oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2653 newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2655 mutex_enter(&ztest_vdev_lock);
2656 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2658 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2661 * Decide whether to do an attach or a replace.
2663 replacing = ztest_random(2);
2666 * Pick a random top-level vdev.
2668 top = ztest_random_vdev_top(spa, B_TRUE);
2671 * Pick a random leaf within it.
2673 leaf = ztest_random(leaves);
2678 oldvd = rvd->vdev_child[top];
2679 if (zs->zs_mirrors >= 1) {
2680 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2681 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2682 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
2684 if (ztest_opts.zo_raidz > 1) {
2685 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2686 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
2687 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
2691 * If we're already doing an attach or replace, oldvd may be a
2692 * mirror vdev -- in which case, pick a random child.
2694 while (oldvd->vdev_children != 0) {
2695 oldvd_has_siblings = B_TRUE;
2696 ASSERT(oldvd->vdev_children >= 2);
2697 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2700 oldguid = oldvd->vdev_guid;
2701 oldsize = vdev_get_min_asize(oldvd);
2702 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2703 (void) strcpy(oldpath, oldvd->vdev_path);
2704 pvd = oldvd->vdev_parent;
2705 pguid = pvd->vdev_guid;
2708 * If oldvd has siblings, then half of the time, detach it.
2710 if (oldvd_has_siblings && ztest_random(2) == 0) {
2711 spa_config_exit(spa, SCL_VDEV, FTAG);
2712 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2713 if (error != 0 && error != ENODEV && error != EBUSY &&
2715 fatal(0, "detach (%s) returned %d", oldpath, error);
2720 * For the new vdev, choose with equal probability between the two
2721 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2723 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2724 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2725 newvd_is_spare = B_TRUE;
2726 (void) strcpy(newpath, newvd->vdev_path);
2728 (void) snprintf(newpath, MAXPATHLEN, ztest_dev_template,
2729 ztest_opts.zo_dir, ztest_opts.zo_pool,
2730 top * leaves + leaf);
2731 if (ztest_random(2) == 0)
2732 newpath[strlen(newpath) - 1] = 'b';
2733 newvd = vdev_lookup_by_path(rvd, newpath);
2737 newsize = vdev_get_min_asize(newvd);
2740 * Make newsize a little bigger or smaller than oldsize.
2741 * If it's smaller, the attach should fail.
2742 * If it's larger, and we're doing a replace,
2743 * we should get dynamic LUN growth when we're done.
2745 newsize = 10 * oldsize / (9 + ztest_random(3));
2749 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2750 * unless it's a replace; in that case any non-replacing parent is OK.
2752 * If newvd is already part of the pool, it should fail with EBUSY.
2754 * If newvd is too small, it should fail with EOVERFLOW.
2756 if (pvd->vdev_ops != &vdev_mirror_ops &&
2757 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2758 pvd->vdev_ops == &vdev_replacing_ops ||
2759 pvd->vdev_ops == &vdev_spare_ops))
2760 expected_error = ENOTSUP;
2761 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2762 expected_error = ENOTSUP;
2763 else if (newvd == oldvd)
2764 expected_error = replacing ? 0 : EBUSY;
2765 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2766 expected_error = EBUSY;
2767 else if (newsize < oldsize)
2768 expected_error = EOVERFLOW;
2769 else if (ashift > oldvd->vdev_top->vdev_ashift)
2770 expected_error = EDOM;
2774 spa_config_exit(spa, SCL_VDEV, FTAG);
2777 * Build the nvlist describing newpath.
2779 root = make_vdev_root(newpath, NULL, newvd == NULL ? newsize : 0,
2780 ashift, 0, 0, 0, 1);
2782 error = spa_vdev_attach(spa, oldguid, root, replacing);
2787 * If our parent was the replacing vdev, but the replace completed,
2788 * then instead of failing with ENOTSUP we may either succeed,
2789 * fail with ENODEV, or fail with EOVERFLOW.
2791 if (expected_error == ENOTSUP &&
2792 (error == 0 || error == ENODEV || error == EOVERFLOW))
2793 expected_error = error;
2796 * If someone grew the LUN, the replacement may be too small.
2798 if (error == EOVERFLOW || error == EBUSY)
2799 expected_error = error;
2801 /* XXX workaround 6690467 */
2802 if (error != expected_error && expected_error != EBUSY) {
2803 fatal(0, "attach (%s %llu, %s %llu, %d) "
2804 "returned %d, expected %d",
2805 oldpath, (longlong_t)oldsize, newpath,
2806 (longlong_t)newsize, replacing, error, expected_error);
2809 mutex_exit(&ztest_vdev_lock);
2811 umem_free(oldpath, MAXPATHLEN);
2812 umem_free(newpath, MAXPATHLEN);
2816 * Callback function which expands the physical size of the vdev.
2819 grow_vdev(vdev_t *vd, void *arg)
2821 ASSERTV(spa_t *spa = vd->vdev_spa);
2822 size_t *newsize = arg;
2826 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2827 ASSERT(vd->vdev_ops->vdev_op_leaf);
2829 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2832 fsize = lseek(fd, 0, SEEK_END);
2833 VERIFY(ftruncate(fd, *newsize) == 0);
2835 if (ztest_opts.zo_verbose >= 6) {
2836 (void) printf("%s grew from %lu to %lu bytes\n",
2837 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2844 * Callback function which expands a given vdev by calling vdev_online().
2848 online_vdev(vdev_t *vd, void *arg)
2850 spa_t *spa = vd->vdev_spa;
2851 vdev_t *tvd = vd->vdev_top;
2852 uint64_t guid = vd->vdev_guid;
2853 uint64_t generation = spa->spa_config_generation + 1;
2854 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2857 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2858 ASSERT(vd->vdev_ops->vdev_op_leaf);
2860 /* Calling vdev_online will initialize the new metaslabs */
2861 spa_config_exit(spa, SCL_STATE, spa);
2862 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2863 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2866 * If vdev_online returned an error or the underlying vdev_open
2867 * failed then we abort the expand. The only way to know that
2868 * vdev_open fails is by checking the returned newstate.
2870 if (error || newstate != VDEV_STATE_HEALTHY) {
2871 if (ztest_opts.zo_verbose >= 5) {
2872 (void) printf("Unable to expand vdev, state %llu, "
2873 "error %d\n", (u_longlong_t)newstate, error);
2877 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
2880 * Since we dropped the lock we need to ensure that we're
2881 * still talking to the original vdev. It's possible this
2882 * vdev may have been detached/replaced while we were
2883 * trying to online it.
2885 if (generation != spa->spa_config_generation) {
2886 if (ztest_opts.zo_verbose >= 5) {
2887 (void) printf("vdev configuration has changed, "
2888 "guid %llu, state %llu, expected gen %llu, "
2891 (u_longlong_t)tvd->vdev_state,
2892 (u_longlong_t)generation,
2893 (u_longlong_t)spa->spa_config_generation);
2901 * Traverse the vdev tree calling the supplied function.
2902 * We continue to walk the tree until we either have walked all
2903 * children or we receive a non-NULL return from the callback.
2904 * If a NULL callback is passed, then we just return back the first
2905 * leaf vdev we encounter.
2908 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
2912 if (vd->vdev_ops->vdev_op_leaf) {
2916 return (func(vd, arg));
2919 for (c = 0; c < vd->vdev_children; c++) {
2920 vdev_t *cvd = vd->vdev_child[c];
2921 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
2928 * Verify that dynamic LUN growth works as expected.
2932 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
2934 spa_t *spa = ztest_spa;
2936 metaslab_class_t *mc;
2937 metaslab_group_t *mg;
2938 size_t psize, newsize;
2940 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
2942 mutex_enter(&ztest_vdev_lock);
2943 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2945 top = ztest_random_vdev_top(spa, B_TRUE);
2947 tvd = spa->spa_root_vdev->vdev_child[top];
2950 old_ms_count = tvd->vdev_ms_count;
2951 old_class_space = metaslab_class_get_space(mc);
2954 * Determine the size of the first leaf vdev associated with
2955 * our top-level device.
2957 vd = vdev_walk_tree(tvd, NULL, NULL);
2958 ASSERT3P(vd, !=, NULL);
2959 ASSERT(vd->vdev_ops->vdev_op_leaf);
2961 psize = vd->vdev_psize;
2964 * We only try to expand the vdev if it's healthy, less than 4x its
2965 * original size, and it has a valid psize.
2967 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
2968 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
2969 spa_config_exit(spa, SCL_STATE, spa);
2970 mutex_exit(&ztest_vdev_lock);
2974 newsize = psize + psize / 8;
2975 ASSERT3U(newsize, >, psize);
2977 if (ztest_opts.zo_verbose >= 6) {
2978 (void) printf("Expanding LUN %s from %lu to %lu\n",
2979 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
2983 * Growing the vdev is a two step process:
2984 * 1). expand the physical size (i.e. relabel)
2985 * 2). online the vdev to create the new metaslabs
2987 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
2988 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
2989 tvd->vdev_state != VDEV_STATE_HEALTHY) {
2990 if (ztest_opts.zo_verbose >= 5) {
2991 (void) printf("Could not expand LUN because "
2992 "the vdev configuration changed.\n");
2994 spa_config_exit(spa, SCL_STATE, spa);
2995 mutex_exit(&ztest_vdev_lock);
2999 spa_config_exit(spa, SCL_STATE, spa);
3002 * Expanding the LUN will update the config asynchronously,
3003 * thus we must wait for the async thread to complete any
3004 * pending tasks before proceeding.
3008 mutex_enter(&spa->spa_async_lock);
3009 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
3010 mutex_exit(&spa->spa_async_lock);
3013 txg_wait_synced(spa_get_dsl(spa), 0);
3014 (void) poll(NULL, 0, 100);
3017 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3019 tvd = spa->spa_root_vdev->vdev_child[top];
3020 new_ms_count = tvd->vdev_ms_count;
3021 new_class_space = metaslab_class_get_space(mc);
3023 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
3024 if (ztest_opts.zo_verbose >= 5) {
3025 (void) printf("Could not verify LUN expansion due to "
3026 "intervening vdev offline or remove.\n");
3028 spa_config_exit(spa, SCL_STATE, spa);
3029 mutex_exit(&ztest_vdev_lock);
3034 * Make sure we were able to grow the vdev.
3036 if (new_ms_count <= old_ms_count)
3037 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
3038 old_ms_count, new_ms_count);
3041 * Make sure we were able to grow the pool.
3043 if (new_class_space <= old_class_space)
3044 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
3045 old_class_space, new_class_space);
3047 if (ztest_opts.zo_verbose >= 5) {
3048 char oldnumbuf[6], newnumbuf[6];
3050 nicenum(old_class_space, oldnumbuf);
3051 nicenum(new_class_space, newnumbuf);
3052 (void) printf("%s grew from %s to %s\n",
3053 spa->spa_name, oldnumbuf, newnumbuf);
3056 spa_config_exit(spa, SCL_STATE, spa);
3057 mutex_exit(&ztest_vdev_lock);
3061 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
3065 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
3068 * Create the objects common to all ztest datasets.
3070 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
3071 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
3075 ztest_dataset_create(char *dsname)
3077 uint64_t zilset = ztest_random(100);
3078 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
3079 ztest_objset_create_cb, NULL);
3081 if (err || zilset < 80)
3084 if (ztest_opts.zo_verbose >= 5)
3085 (void) printf("Setting dataset %s to sync always\n", dsname);
3086 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3087 ZFS_SYNC_ALWAYS, B_FALSE));
3092 ztest_objset_destroy_cb(const char *name, void *arg)
3095 dmu_object_info_t doi;
3099 * Verify that the dataset contains a directory object.
3101 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os));
3102 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3103 if (error != ENOENT) {
3104 /* We could have crashed in the middle of destroying it */
3105 ASSERT3U(error, ==, 0);
3106 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3107 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3109 dmu_objset_rele(os, FTAG);
3112 * Destroy the dataset.
3114 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE));
3119 ztest_snapshot_create(char *osname, uint64_t id)
3121 char snapname[MAXNAMELEN];
3124 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3127 error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1,
3128 NULL, NULL, B_FALSE, B_FALSE, -1);
3129 if (error == ENOSPC) {
3130 ztest_record_enospc(FTAG);
3133 if (error != 0 && error != EEXIST)
3134 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error);
3139 ztest_snapshot_destroy(char *osname, uint64_t id)
3141 char snapname[MAXNAMELEN];
3144 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3147 error = dmu_objset_destroy(snapname, B_FALSE);
3148 if (error != 0 && error != ENOENT)
3149 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
3155 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3165 zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
3166 name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3168 (void) rw_enter(&ztest_name_lock, RW_READER);
3170 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
3171 ztest_opts.zo_pool, (u_longlong_t)id);
3174 * If this dataset exists from a previous run, process its replay log
3175 * half of the time. If we don't replay it, then dmu_objset_destroy()
3176 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3178 if (ztest_random(2) == 0 &&
3179 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
3180 ztest_zd_init(zdtmp, NULL, os);
3181 zil_replay(os, zdtmp, ztest_replay_vector);
3182 ztest_zd_fini(zdtmp);
3183 dmu_objset_disown(os, FTAG);
3187 * There may be an old instance of the dataset we're about to
3188 * create lying around from a previous run. If so, destroy it
3189 * and all of its snapshots.
3191 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
3192 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3195 * Verify that the destroyed dataset is no longer in the namespace.
3197 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os));
3200 * Verify that we can create a new dataset.
3202 error = ztest_dataset_create(name);
3204 if (error == ENOSPC) {
3205 ztest_record_enospc(FTAG);
3208 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3212 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3214 ztest_zd_init(zdtmp, NULL, os);
3217 * Open the intent log for it.
3219 zilog = zil_open(os, ztest_get_data);
3222 * Put some objects in there, do a little I/O to them,
3223 * and randomly take a couple of snapshots along the way.
3225 iters = ztest_random(5);
3226 for (i = 0; i < iters; i++) {
3227 ztest_dmu_object_alloc_free(zdtmp, id);
3228 if (ztest_random(iters) == 0)
3229 (void) ztest_snapshot_create(name, i);
3233 * Verify that we cannot create an existing dataset.
3235 VERIFY3U(EEXIST, ==,
3236 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3239 * Verify that we can hold an objset that is also owned.
3241 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3242 dmu_objset_rele(os2, FTAG);
3245 * Verify that we cannot own an objset that is already owned.
3248 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3251 dmu_objset_disown(os, FTAG);
3252 ztest_zd_fini(zdtmp);
3254 (void) rw_exit(&ztest_name_lock);
3256 umem_free(name, MAXNAMELEN);
3257 umem_free(zdtmp, sizeof (ztest_ds_t));
3261 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3264 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3266 (void) rw_enter(&ztest_name_lock, RW_READER);
3267 (void) ztest_snapshot_destroy(zd->zd_name, id);
3268 (void) ztest_snapshot_create(zd->zd_name, id);
3269 (void) rw_exit(&ztest_name_lock);
3273 * Cleanup non-standard snapshots and clones.
3276 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3285 snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3286 clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3287 snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3288 clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3289 snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3291 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu",
3292 osname, (u_longlong_t)id);
3293 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu",
3294 osname, (u_longlong_t)id);
3295 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu",
3296 clone1name, (u_longlong_t)id);
3297 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu",
3298 osname, (u_longlong_t)id);
3299 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu",
3300 clone1name, (u_longlong_t)id);
3302 error = dmu_objset_destroy(clone2name, B_FALSE);
3303 if (error && error != ENOENT)
3304 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error);
3305 error = dmu_objset_destroy(snap3name, B_FALSE);
3306 if (error && error != ENOENT)
3307 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error);
3308 error = dmu_objset_destroy(snap2name, B_FALSE);
3309 if (error && error != ENOENT)
3310 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error);
3311 error = dmu_objset_destroy(clone1name, B_FALSE);
3312 if (error && error != ENOENT)
3313 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error);
3314 error = dmu_objset_destroy(snap1name, B_FALSE);
3315 if (error && error != ENOENT)
3316 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error);
3318 umem_free(snap1name, MAXNAMELEN);
3319 umem_free(clone1name, MAXNAMELEN);
3320 umem_free(snap2name, MAXNAMELEN);
3321 umem_free(clone2name, MAXNAMELEN);
3322 umem_free(snap3name, MAXNAMELEN);
3326 * Verify dsl_dataset_promote handles EBUSY
3329 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3338 char *osname = zd->zd_name;
3341 snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3342 clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3343 snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3344 clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3345 snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3347 (void) rw_enter(&ztest_name_lock, RW_READER);
3349 ztest_dsl_dataset_cleanup(osname, id);
3351 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu",
3352 osname, (u_longlong_t)id);
3353 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu",
3354 osname, (u_longlong_t)id);
3355 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu",
3356 clone1name, (u_longlong_t)id);
3357 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu",
3358 osname, (u_longlong_t)id);
3359 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu",
3360 clone1name, (u_longlong_t)id);
3362 error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1,
3363 NULL, NULL, B_FALSE, B_FALSE, -1);
3364 if (error && error != EEXIST) {
3365 if (error == ENOSPC) {
3366 ztest_record_enospc(FTAG);
3369 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3372 error = dmu_objset_hold(snap1name, FTAG, &clone);
3374 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error);
3376 error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0);
3377 dmu_objset_rele(clone, FTAG);
3379 if (error == ENOSPC) {
3380 ztest_record_enospc(FTAG);
3383 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3386 error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1,
3387 NULL, NULL, B_FALSE, B_FALSE, -1);
3388 if (error && error != EEXIST) {
3389 if (error == ENOSPC) {
3390 ztest_record_enospc(FTAG);
3393 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3396 error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1,
3397 NULL, NULL, B_FALSE, B_FALSE, -1);
3398 if (error && error != EEXIST) {
3399 if (error == ENOSPC) {
3400 ztest_record_enospc(FTAG);
3403 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3406 error = dmu_objset_hold(snap3name, FTAG, &clone);
3408 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3410 error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0);
3411 dmu_objset_rele(clone, FTAG);
3413 if (error == ENOSPC) {
3414 ztest_record_enospc(FTAG);
3417 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3420 error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds);
3422 fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error);
3423 error = dsl_dataset_promote(clone2name, NULL);
3425 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3427 dsl_dataset_disown(ds, FTAG);
3430 ztest_dsl_dataset_cleanup(osname, id);
3432 (void) rw_exit(&ztest_name_lock);
3434 umem_free(snap1name, MAXNAMELEN);
3435 umem_free(clone1name, MAXNAMELEN);
3436 umem_free(snap2name, MAXNAMELEN);
3437 umem_free(clone2name, MAXNAMELEN);
3438 umem_free(snap3name, MAXNAMELEN);
3441 #undef OD_ARRAY_SIZE
3442 #define OD_ARRAY_SIZE 4
3445 * Verify that dmu_object_{alloc,free} work as expected.
3448 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3455 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3456 od = umem_alloc(size, UMEM_NOFAIL);
3457 batchsize = OD_ARRAY_SIZE;
3459 for (b = 0; b < batchsize; b++)
3460 ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3463 * Destroy the previous batch of objects, create a new batch,
3464 * and do some I/O on the new objects.
3466 if (ztest_object_init(zd, od, size, B_TRUE) != 0)
3469 while (ztest_random(4 * batchsize) != 0)
3470 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3471 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3473 umem_free(od, size);
3476 #undef OD_ARRAY_SIZE
3477 #define OD_ARRAY_SIZE 2
3480 * Verify that dmu_{read,write} work as expected.
3483 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3488 objset_t *os = zd->zd_os;
3489 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3490 od = umem_alloc(size, UMEM_NOFAIL);
3492 int i, freeit, error;
3494 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3495 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3496 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3497 uint64_t regions = 997;
3498 uint64_t stride = 123456789ULL;
3499 uint64_t width = 40;
3500 int free_percent = 5;
3503 * This test uses two objects, packobj and bigobj, that are always
3504 * updated together (i.e. in the same tx) so that their contents are
3505 * in sync and can be compared. Their contents relate to each other
3506 * in a simple way: packobj is a dense array of 'bufwad' structures,
3507 * while bigobj is a sparse array of the same bufwads. Specifically,
3508 * for any index n, there are three bufwads that should be identical:
3510 * packobj, at offset n * sizeof (bufwad_t)
3511 * bigobj, at the head of the nth chunk
3512 * bigobj, at the tail of the nth chunk
3514 * The chunk size is arbitrary. It doesn't have to be a power of two,
3515 * and it doesn't have any relation to the object blocksize.
3516 * The only requirement is that it can hold at least two bufwads.
3518 * Normally, we write the bufwad to each of these locations.
3519 * However, free_percent of the time we instead write zeroes to
3520 * packobj and perform a dmu_free_range() on bigobj. By comparing
3521 * bigobj to packobj, we can verify that the DMU is correctly
3522 * tracking which parts of an object are allocated and free,
3523 * and that the contents of the allocated blocks are correct.
3527 * Read the directory info. If it's the first time, set things up.
3529 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3530 ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3532 if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
3533 umem_free(od, size);
3537 bigobj = od[0].od_object;
3538 packobj = od[1].od_object;
3539 chunksize = od[0].od_gen;
3540 ASSERT(chunksize == od[1].od_gen);
3543 * Prefetch a random chunk of the big object.
3544 * Our aim here is to get some async reads in flight
3545 * for blocks that we may free below; the DMU should
3546 * handle this race correctly.
3548 n = ztest_random(regions) * stride + ztest_random(width);
3549 s = 1 + ztest_random(2 * width - 1);
3550 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3553 * Pick a random index and compute the offsets into packobj and bigobj.
3555 n = ztest_random(regions) * stride + ztest_random(width);
3556 s = 1 + ztest_random(width - 1);
3558 packoff = n * sizeof (bufwad_t);
3559 packsize = s * sizeof (bufwad_t);
3561 bigoff = n * chunksize;
3562 bigsize = s * chunksize;
3564 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3565 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3568 * free_percent of the time, free a range of bigobj rather than
3571 freeit = (ztest_random(100) < free_percent);
3574 * Read the current contents of our objects.
3576 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3578 ASSERT3U(error, ==, 0);
3579 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3581 ASSERT3U(error, ==, 0);
3584 * Get a tx for the mods to both packobj and bigobj.
3586 tx = dmu_tx_create(os);
3588 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3591 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3593 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3595 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3597 umem_free(packbuf, packsize);
3598 umem_free(bigbuf, bigsize);
3599 umem_free(od, size);
3603 dmu_object_set_checksum(os, bigobj,
3604 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3606 dmu_object_set_compress(os, bigobj,
3607 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3610 * For each index from n to n + s, verify that the existing bufwad
3611 * in packobj matches the bufwads at the head and tail of the
3612 * corresponding chunk in bigobj. Then update all three bufwads
3613 * with the new values we want to write out.
3615 for (i = 0; i < s; i++) {
3617 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3619 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3621 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3623 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3624 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3626 if (pack->bw_txg > txg)
3627 fatal(0, "future leak: got %llx, open txg is %llx",
3630 if (pack->bw_data != 0 && pack->bw_index != n + i)
3631 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3632 pack->bw_index, n, i);
3634 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3635 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3637 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3638 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3641 bzero(pack, sizeof (bufwad_t));
3643 pack->bw_index = n + i;
3645 pack->bw_data = 1 + ztest_random(-2ULL);
3652 * We've verified all the old bufwads, and made new ones.
3653 * Now write them out.
3655 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3658 if (ztest_opts.zo_verbose >= 7) {
3659 (void) printf("freeing offset %llx size %llx"
3661 (u_longlong_t)bigoff,
3662 (u_longlong_t)bigsize,
3665 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3667 if (ztest_opts.zo_verbose >= 7) {
3668 (void) printf("writing offset %llx size %llx"
3670 (u_longlong_t)bigoff,
3671 (u_longlong_t)bigsize,
3674 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3680 * Sanity check the stuff we just wrote.
3683 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3684 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3686 VERIFY(0 == dmu_read(os, packobj, packoff,
3687 packsize, packcheck, DMU_READ_PREFETCH));
3688 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3689 bigsize, bigcheck, DMU_READ_PREFETCH));
3691 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3692 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3694 umem_free(packcheck, packsize);
3695 umem_free(bigcheck, bigsize);
3698 umem_free(packbuf, packsize);
3699 umem_free(bigbuf, bigsize);
3700 umem_free(od, size);
3704 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3705 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3713 * For each index from n to n + s, verify that the existing bufwad
3714 * in packobj matches the bufwads at the head and tail of the
3715 * corresponding chunk in bigobj. Then update all three bufwads
3716 * with the new values we want to write out.
3718 for (i = 0; i < s; i++) {
3720 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3722 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3724 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3726 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3727 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3729 if (pack->bw_txg > txg)
3730 fatal(0, "future leak: got %llx, open txg is %llx",
3733 if (pack->bw_data != 0 && pack->bw_index != n + i)
3734 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3735 pack->bw_index, n, i);
3737 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3738 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3740 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3741 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3743 pack->bw_index = n + i;
3745 pack->bw_data = 1 + ztest_random(-2ULL);
3752 #undef OD_ARRAY_SIZE
3753 #define OD_ARRAY_SIZE 2
3756 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3758 objset_t *os = zd->zd_os;
3765 bufwad_t *packbuf, *bigbuf;
3766 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3767 uint64_t blocksize = ztest_random_blocksize();
3768 uint64_t chunksize = blocksize;
3769 uint64_t regions = 997;
3770 uint64_t stride = 123456789ULL;
3772 dmu_buf_t *bonus_db;
3773 arc_buf_t **bigbuf_arcbufs;
3774 dmu_object_info_t doi;
3776 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3777 od = umem_alloc(size, UMEM_NOFAIL);
3780 * This test uses two objects, packobj and bigobj, that are always
3781 * updated together (i.e. in the same tx) so that their contents are
3782 * in sync and can be compared. Their contents relate to each other
3783 * in a simple way: packobj is a dense array of 'bufwad' structures,
3784 * while bigobj is a sparse array of the same bufwads. Specifically,
3785 * for any index n, there are three bufwads that should be identical:
3787 * packobj, at offset n * sizeof (bufwad_t)
3788 * bigobj, at the head of the nth chunk
3789 * bigobj, at the tail of the nth chunk
3791 * The chunk size is set equal to bigobj block size so that
3792 * dmu_assign_arcbuf() can be tested for object updates.
3796 * Read the directory info. If it's the first time, set things up.
3798 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3799 ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3802 if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
3803 umem_free(od, size);
3807 bigobj = od[0].od_object;
3808 packobj = od[1].od_object;
3809 blocksize = od[0].od_blocksize;
3810 chunksize = blocksize;
3811 ASSERT(chunksize == od[1].od_gen);
3813 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3814 VERIFY(ISP2(doi.doi_data_block_size));
3815 VERIFY(chunksize == doi.doi_data_block_size);
3816 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3819 * Pick a random index and compute the offsets into packobj and bigobj.
3821 n = ztest_random(regions) * stride + ztest_random(width);
3822 s = 1 + ztest_random(width - 1);
3824 packoff = n * sizeof (bufwad_t);
3825 packsize = s * sizeof (bufwad_t);
3827 bigoff = n * chunksize;
3828 bigsize = s * chunksize;
3830 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3831 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3833 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3835 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3838 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3839 * Iteration 1 test zcopy to already referenced dbufs.
3840 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3841 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3842 * Iteration 4 test zcopy when dbuf is no longer dirty.
3843 * Iteration 5 test zcopy when it can't be done.
3844 * Iteration 6 one more zcopy write.
3846 for (i = 0; i < 7; i++) {
3851 * In iteration 5 (i == 5) use arcbufs
3852 * that don't match bigobj blksz to test
3853 * dmu_assign_arcbuf() when it can't directly
3854 * assign an arcbuf to a dbuf.
3856 for (j = 0; j < s; j++) {
3859 dmu_request_arcbuf(bonus_db, chunksize);
3861 bigbuf_arcbufs[2 * j] =
3862 dmu_request_arcbuf(bonus_db, chunksize / 2);
3863 bigbuf_arcbufs[2 * j + 1] =
3864 dmu_request_arcbuf(bonus_db, chunksize / 2);
3869 * Get a tx for the mods to both packobj and bigobj.
3871 tx = dmu_tx_create(os);
3873 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3874 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3876 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3878 umem_free(packbuf, packsize);
3879 umem_free(bigbuf, bigsize);
3880 for (j = 0; j < s; j++) {
3882 dmu_return_arcbuf(bigbuf_arcbufs[j]);
3885 bigbuf_arcbufs[2 * j]);
3887 bigbuf_arcbufs[2 * j + 1]);
3890 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3891 umem_free(od, size);
3892 dmu_buf_rele(bonus_db, FTAG);
3897 * 50% of the time don't read objects in the 1st iteration to
3898 * test dmu_assign_arcbuf() for the case when there're no
3899 * existing dbufs for the specified offsets.
3901 if (i != 0 || ztest_random(2) != 0) {
3902 error = dmu_read(os, packobj, packoff,
3903 packsize, packbuf, DMU_READ_PREFETCH);
3904 ASSERT3U(error, ==, 0);
3905 error = dmu_read(os, bigobj, bigoff, bigsize,
3906 bigbuf, DMU_READ_PREFETCH);
3907 ASSERT3U(error, ==, 0);
3909 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3913 * We've verified all the old bufwads, and made new ones.
3914 * Now write them out.
3916 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3917 if (ztest_opts.zo_verbose >= 7) {
3918 (void) printf("writing offset %llx size %llx"
3920 (u_longlong_t)bigoff,
3921 (u_longlong_t)bigsize,
3924 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3927 bcopy((caddr_t)bigbuf + (off - bigoff),
3928 bigbuf_arcbufs[j]->b_data, chunksize);
3930 bcopy((caddr_t)bigbuf + (off - bigoff),
3931 bigbuf_arcbufs[2 * j]->b_data,
3933 bcopy((caddr_t)bigbuf + (off - bigoff) +
3935 bigbuf_arcbufs[2 * j + 1]->b_data,
3940 VERIFY(dmu_buf_hold(os, bigobj, off,
3941 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
3944 dmu_assign_arcbuf(bonus_db, off,
3945 bigbuf_arcbufs[j], tx);
3947 dmu_assign_arcbuf(bonus_db, off,
3948 bigbuf_arcbufs[2 * j], tx);
3949 dmu_assign_arcbuf(bonus_db,
3950 off + chunksize / 2,
3951 bigbuf_arcbufs[2 * j + 1], tx);
3954 dmu_buf_rele(dbt, FTAG);
3960 * Sanity check the stuff we just wrote.
3963 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3964 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3966 VERIFY(0 == dmu_read(os, packobj, packoff,
3967 packsize, packcheck, DMU_READ_PREFETCH));
3968 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3969 bigsize, bigcheck, DMU_READ_PREFETCH));
3971 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3972 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3974 umem_free(packcheck, packsize);
3975 umem_free(bigcheck, bigsize);
3978 txg_wait_open(dmu_objset_pool(os), 0);
3979 } else if (i == 3) {
3980 txg_wait_synced(dmu_objset_pool(os), 0);
3984 dmu_buf_rele(bonus_db, FTAG);
3985 umem_free(packbuf, packsize);
3986 umem_free(bigbuf, bigsize);
3987 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3988 umem_free(od, size);
3993 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
3997 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
3998 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
3999 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4002 * Have multiple threads write to large offsets in an object
4003 * to verify that parallel writes to an object -- even to the
4004 * same blocks within the object -- doesn't cause any trouble.
4006 ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4008 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
4011 while (ztest_random(10) != 0)
4012 ztest_io(zd, od->od_object, offset);
4014 umem_free(od, sizeof(ztest_od_t));
4018 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
4021 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
4022 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4023 uint64_t count = ztest_random(20) + 1;
4024 uint64_t blocksize = ztest_random_blocksize();
4027 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4029 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4031 if (ztest_object_init(zd, od, sizeof (ztest_od_t), !ztest_random(2)) != 0) {
4032 umem_free(od, sizeof(ztest_od_t));
4036 if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
4037 umem_free(od, sizeof(ztest_od_t));
4041 ztest_prealloc(zd, od->od_object, offset, count * blocksize);
4043 data = umem_zalloc(blocksize, UMEM_NOFAIL);
4045 while (ztest_random(count) != 0) {
4046 uint64_t randoff = offset + (ztest_random(count) * blocksize);
4047 if (ztest_write(zd, od->od_object, randoff, blocksize,
4050 while (ztest_random(4) != 0)
4051 ztest_io(zd, od->od_object, randoff);
4054 umem_free(data, blocksize);
4055 umem_free(od, sizeof(ztest_od_t));
4059 * Verify that zap_{create,destroy,add,remove,update} work as expected.
4061 #define ZTEST_ZAP_MIN_INTS 1
4062 #define ZTEST_ZAP_MAX_INTS 4
4063 #define ZTEST_ZAP_MAX_PROPS 1000
4066 ztest_zap(ztest_ds_t *zd, uint64_t id)
4068 objset_t *os = zd->zd_os;
4071 uint64_t txg, last_txg;
4072 uint64_t value[ZTEST_ZAP_MAX_INTS];
4073 uint64_t zl_ints, zl_intsize, prop;
4076 char propname[100], txgname[100];
4078 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
4080 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4081 ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4083 if (ztest_object_init(zd, od, sizeof (ztest_od_t),
4084 !ztest_random(2)) != 0)
4087 object = od->od_object;
4090 * Generate a known hash collision, and verify that
4091 * we can lookup and remove both entries.
4093 tx = dmu_tx_create(os);
4094 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4095 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4098 for (i = 0; i < 2; i++) {
4100 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
4103 for (i = 0; i < 2; i++) {
4104 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
4105 sizeof (uint64_t), 1, &value[i], tx));
4107 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
4108 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4109 ASSERT3U(zl_ints, ==, 1);
4111 for (i = 0; i < 2; i++) {
4112 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
4117 * Generate a buch of random entries.
4119 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
4121 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4122 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4123 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4124 bzero(value, sizeof (value));
4128 * If these zap entries already exist, validate their contents.
4130 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4132 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4133 ASSERT3U(zl_ints, ==, 1);
4135 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
4136 zl_ints, &last_txg) == 0);
4138 VERIFY(zap_length(os, object, propname, &zl_intsize,
4141 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4142 ASSERT3U(zl_ints, ==, ints);
4144 VERIFY(zap_lookup(os, object, propname, zl_intsize,
4145 zl_ints, value) == 0);
4147 for (i = 0; i < ints; i++) {
4148 ASSERT3U(value[i], ==, last_txg + object + i);
4151 ASSERT3U(error, ==, ENOENT);
4155 * Atomically update two entries in our zap object.
4156 * The first is named txg_%llu, and contains the txg
4157 * in which the property was last updated. The second
4158 * is named prop_%llu, and the nth element of its value
4159 * should be txg + object + n.
4161 tx = dmu_tx_create(os);
4162 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4163 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4168 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4170 for (i = 0; i < ints; i++)
4171 value[i] = txg + object + i;
4173 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4175 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4181 * Remove a random pair of entries.
4183 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4184 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4185 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4187 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4189 if (error == ENOENT)
4192 ASSERT3U(error, ==, 0);
4194 tx = dmu_tx_create(os);
4195 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4196 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4199 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4200 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4203 umem_free(od, sizeof(ztest_od_t));
4207 * Testcase to test the upgrading of a microzap to fatzap.
4210 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4212 objset_t *os = zd->zd_os;
4214 uint64_t object, txg;
4217 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4218 ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4220 if (ztest_object_init(zd, od, sizeof (ztest_od_t),
4221 !ztest_random(2)) != 0)
4223 object = od->od_object;
4226 * Add entries to this ZAP and make sure it spills over
4227 * and gets upgraded to a fatzap. Also, since we are adding
4228 * 2050 entries we should see ptrtbl growth and leaf-block split.
4230 for (i = 0; i < 2050; i++) {
4231 char name[MAXNAMELEN];
4236 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4237 (u_longlong_t)id, (u_longlong_t)value);
4239 tx = dmu_tx_create(os);
4240 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4241 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4244 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4246 ASSERT(error == 0 || error == EEXIST);
4250 umem_free(od, sizeof(ztest_od_t));
4255 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4257 objset_t *os = zd->zd_os;
4259 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4261 int i, namelen, error;
4262 int micro = ztest_random(2);
4263 char name[20], string_value[20];
4266 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4267 ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4269 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
4270 umem_free(od, sizeof(ztest_od_t));
4274 object = od->od_object;
4277 * Generate a random name of the form 'xxx.....' where each
4278 * x is a random printable character and the dots are dots.
4279 * There are 94 such characters, and the name length goes from
4280 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4282 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4284 for (i = 0; i < 3; i++)
4285 name[i] = '!' + ztest_random('~' - '!' + 1);
4286 for (; i < namelen - 1; i++)
4290 if ((namelen & 1) || micro) {
4291 wsize = sizeof (txg);
4297 data = string_value;
4301 VERIFY(zap_count(os, object, &count) == 0);
4302 ASSERT(count != -1ULL);
4305 * Select an operation: length, lookup, add, update, remove.
4307 i = ztest_random(5);
4310 tx = dmu_tx_create(os);
4311 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4312 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4315 bcopy(name, string_value, namelen);
4319 bzero(string_value, namelen);
4325 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4327 ASSERT3U(wsize, ==, zl_wsize);
4328 ASSERT3U(wc, ==, zl_wc);
4330 ASSERT3U(error, ==, ENOENT);
4335 error = zap_lookup(os, object, name, wsize, wc, data);
4337 if (data == string_value &&
4338 bcmp(name, data, namelen) != 0)
4339 fatal(0, "name '%s' != val '%s' len %d",
4340 name, data, namelen);
4342 ASSERT3U(error, ==, ENOENT);
4347 error = zap_add(os, object, name, wsize, wc, data, tx);
4348 ASSERT(error == 0 || error == EEXIST);
4352 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4356 error = zap_remove(os, object, name, tx);
4357 ASSERT(error == 0 || error == ENOENT);
4364 umem_free(od, sizeof(ztest_od_t));
4368 * Commit callback data.
4370 typedef struct ztest_cb_data {
4371 list_node_t zcd_node;
4373 int zcd_expected_err;
4374 boolean_t zcd_added;
4375 boolean_t zcd_called;
4379 /* This is the actual commit callback function */
4381 ztest_commit_callback(void *arg, int error)
4383 ztest_cb_data_t *data = arg;
4384 uint64_t synced_txg;
4386 VERIFY(data != NULL);
4387 VERIFY3S(data->zcd_expected_err, ==, error);
4388 VERIFY(!data->zcd_called);
4390 synced_txg = spa_last_synced_txg(data->zcd_spa);
4391 if (data->zcd_txg > synced_txg)
4392 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4393 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4396 data->zcd_called = B_TRUE;
4398 if (error == ECANCELED) {
4399 ASSERT3U(data->zcd_txg, ==, 0);
4400 ASSERT(!data->zcd_added);
4403 * The private callback data should be destroyed here, but
4404 * since we are going to check the zcd_called field after
4405 * dmu_tx_abort(), we will destroy it there.
4410 ASSERT(data->zcd_added);
4411 ASSERT3U(data->zcd_txg, !=, 0);
4413 (void) mutex_enter(&zcl.zcl_callbacks_lock);
4415 /* See if this cb was called more quickly */
4416 if ((synced_txg - data->zcd_txg) < zc_min_txg_delay)
4417 zc_min_txg_delay = synced_txg - data->zcd_txg;
4419 /* Remove our callback from the list */
4420 list_remove(&zcl.zcl_callbacks, data);
4422 (void) mutex_exit(&zcl.zcl_callbacks_lock);
4424 umem_free(data, sizeof (ztest_cb_data_t));
4427 /* Allocate and initialize callback data structure */
4428 static ztest_cb_data_t *
4429 ztest_create_cb_data(objset_t *os, uint64_t txg)
4431 ztest_cb_data_t *cb_data;
4433 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4435 cb_data->zcd_txg = txg;
4436 cb_data->zcd_spa = dmu_objset_spa(os);
4437 list_link_init(&cb_data->zcd_node);
4443 * Commit callback test.
4446 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4448 objset_t *os = zd->zd_os;
4451 ztest_cb_data_t *cb_data[3], *tmp_cb;
4452 uint64_t old_txg, txg;
4455 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4456 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4458 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
4459 umem_free(od, sizeof(ztest_od_t));
4463 tx = dmu_tx_create(os);
4465 cb_data[0] = ztest_create_cb_data(os, 0);
4466 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4468 dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t));
4470 /* Every once in a while, abort the transaction on purpose */
4471 if (ztest_random(100) == 0)
4475 error = dmu_tx_assign(tx, TXG_NOWAIT);
4477 txg = error ? 0 : dmu_tx_get_txg(tx);
4479 cb_data[0]->zcd_txg = txg;
4480 cb_data[1] = ztest_create_cb_data(os, txg);
4481 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4485 * It's not a strict requirement to call the registered
4486 * callbacks from inside dmu_tx_abort(), but that's what
4487 * it's supposed to happen in the current implementation
4488 * so we will check for that.
4490 for (i = 0; i < 2; i++) {
4491 cb_data[i]->zcd_expected_err = ECANCELED;
4492 VERIFY(!cb_data[i]->zcd_called);
4497 for (i = 0; i < 2; i++) {
4498 VERIFY(cb_data[i]->zcd_called);
4499 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4502 umem_free(od, sizeof(ztest_od_t));
4506 cb_data[2] = ztest_create_cb_data(os, txg);
4507 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4510 * Read existing data to make sure there isn't a future leak.
4512 VERIFY(0 == dmu_read(os, od->od_object, 0, sizeof (uint64_t),
4513 &old_txg, DMU_READ_PREFETCH));
4516 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4519 dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
4521 (void) mutex_enter(&zcl.zcl_callbacks_lock);
4524 * Since commit callbacks don't have any ordering requirement and since
4525 * it is theoretically possible for a commit callback to be called
4526 * after an arbitrary amount of time has elapsed since its txg has been
4527 * synced, it is difficult to reliably determine whether a commit
4528 * callback hasn't been called due to high load or due to a flawed
4531 * In practice, we will assume that if after a certain number of txgs a
4532 * commit callback hasn't been called, then most likely there's an
4533 * implementation bug..
4535 tmp_cb = list_head(&zcl.zcl_callbacks);
4536 if (tmp_cb != NULL &&
4537 tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) {
4538 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4539 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4543 * Let's find the place to insert our callbacks.
4545 * Even though the list is ordered by txg, it is possible for the
4546 * insertion point to not be the end because our txg may already be
4547 * quiescing at this point and other callbacks in the open txg
4548 * (from other objsets) may have sneaked in.
4550 tmp_cb = list_tail(&zcl.zcl_callbacks);
4551 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4552 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4554 /* Add the 3 callbacks to the list */
4555 for (i = 0; i < 3; i++) {
4557 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4559 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4562 cb_data[i]->zcd_added = B_TRUE;
4563 VERIFY(!cb_data[i]->zcd_called);
4565 tmp_cb = cb_data[i];
4570 (void) mutex_exit(&zcl.zcl_callbacks_lock);
4574 umem_free(od, sizeof(ztest_od_t));
4579 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4581 zfs_prop_t proplist[] = {
4583 ZFS_PROP_COMPRESSION,
4589 (void) rw_enter(&ztest_name_lock, RW_READER);
4591 for (p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4592 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4593 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4595 (void) rw_exit(&ztest_name_lock);
4600 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4602 nvlist_t *props = NULL;
4604 (void) rw_enter(&ztest_name_lock, RW_READER);
4606 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4607 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4609 VERIFY3U(spa_prop_get(ztest_spa, &props), ==, 0);
4611 if (ztest_opts.zo_verbose >= 6)
4612 dump_nvlist(props, 4);
4616 (void) rw_exit(&ztest_name_lock);
4620 * Test snapshot hold/release and deferred destroy.
4623 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4626 objset_t *os = zd->zd_os;
4630 char clonename[100];
4632 char osname[MAXNAMELEN];
4634 (void) rw_enter(&ztest_name_lock, RW_READER);
4636 dmu_objset_name(os, osname);
4638 (void) snprintf(snapname, 100, "sh1_%llu", (u_longlong_t)id);
4639 (void) snprintf(fullname, 100, "%s@%s", osname, snapname);
4640 (void) snprintf(clonename, 100, "%s/ch1_%llu",osname,(u_longlong_t)id);
4641 (void) snprintf(tag, 100, "tag_%llu", (u_longlong_t)id);
4644 * Clean up from any previous run.
4646 (void) dmu_objset_destroy(clonename, B_FALSE);
4647 (void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4648 (void) dmu_objset_destroy(fullname, B_FALSE);
4651 * Create snapshot, clone it, mark snap for deferred destroy,
4652 * destroy clone, verify snap was also destroyed.
4654 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4657 if (error == ENOSPC) {
4658 ztest_record_enospc("dmu_objset_snapshot");
4661 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4664 error = dmu_objset_hold(fullname, FTAG, &origin);
4666 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4668 error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0);
4669 dmu_objset_rele(origin, FTAG);
4671 if (error == ENOSPC) {
4672 ztest_record_enospc("dmu_objset_clone");
4675 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4678 error = dmu_objset_destroy(fullname, B_TRUE);
4680 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4684 error = dmu_objset_destroy(clonename, B_FALSE);
4686 fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error);
4688 error = dmu_objset_hold(fullname, FTAG, &origin);
4689 if (error != ENOENT)
4690 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4693 * Create snapshot, add temporary hold, verify that we can't
4694 * destroy a held snapshot, mark for deferred destroy,
4695 * release hold, verify snapshot was destroyed.
4697 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4700 if (error == ENOSPC) {
4701 ztest_record_enospc("dmu_objset_snapshot");
4704 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4707 error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE,
4710 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4712 error = dmu_objset_destroy(fullname, B_FALSE);
4713 if (error != EBUSY) {
4714 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
4718 error = dmu_objset_destroy(fullname, B_TRUE);
4720 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4724 error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4726 fatal(0, "dsl_dataset_user_release(%s)", fullname, tag);
4728 VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT);
4731 (void) rw_exit(&ztest_name_lock);
4735 * Inject random faults into the on-disk data.
4739 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4741 ztest_shared_t *zs = ztest_shared;
4742 spa_t *spa = ztest_spa;
4746 uint64_t bad = 0x1990c0ffeedecadeull;
4751 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4757 boolean_t islog = B_FALSE;
4759 path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
4760 pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
4762 mutex_enter(&ztest_vdev_lock);
4763 maxfaults = MAXFAULTS();
4764 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4765 mirror_save = zs->zs_mirrors;
4766 mutex_exit(&ztest_vdev_lock);
4768 ASSERT(leaves >= 1);
4771 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4773 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4775 if (ztest_random(2) == 0) {
4777 * Inject errors on a normal data device or slog device.
4779 top = ztest_random_vdev_top(spa, B_TRUE);
4780 leaf = ztest_random(leaves) + zs->zs_splits;
4783 * Generate paths to the first leaf in this top-level vdev,
4784 * and to the random leaf we selected. We'll induce transient
4785 * write failures and random online/offline activity on leaf 0,
4786 * and we'll write random garbage to the randomly chosen leaf.
4788 (void) snprintf(path0, MAXPATHLEN, ztest_dev_template,
4789 ztest_opts.zo_dir, ztest_opts.zo_pool,
4790 top * leaves + zs->zs_splits);
4791 (void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template,
4792 ztest_opts.zo_dir, ztest_opts.zo_pool,
4793 top * leaves + leaf);
4795 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4796 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4799 if (vd0 != NULL && maxfaults != 1) {
4801 * Make vd0 explicitly claim to be unreadable,
4802 * or unwriteable, or reach behind its back
4803 * and close the underlying fd. We can do this if
4804 * maxfaults == 0 because we'll fail and reexecute,
4805 * and we can do it if maxfaults >= 2 because we'll
4806 * have enough redundancy. If maxfaults == 1, the
4807 * combination of this with injection of random data
4808 * corruption below exceeds the pool's fault tolerance.
4810 vdev_file_t *vf = vd0->vdev_tsd;
4812 if (vf != NULL && ztest_random(3) == 0) {
4813 (void) close(vf->vf_vnode->v_fd);
4814 vf->vf_vnode->v_fd = -1;
4815 } else if (ztest_random(2) == 0) {
4816 vd0->vdev_cant_read = B_TRUE;
4818 vd0->vdev_cant_write = B_TRUE;
4820 guid0 = vd0->vdev_guid;
4824 * Inject errors on an l2cache device.
4826 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4828 if (sav->sav_count == 0) {
4829 spa_config_exit(spa, SCL_STATE, FTAG);
4832 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4833 guid0 = vd0->vdev_guid;
4834 (void) strcpy(path0, vd0->vdev_path);
4835 (void) strcpy(pathrand, vd0->vdev_path);
4839 maxfaults = INT_MAX; /* no limit on cache devices */
4842 spa_config_exit(spa, SCL_STATE, FTAG);
4845 * If we can tolerate two or more faults, or we're dealing
4846 * with a slog, randomly online/offline vd0.
4848 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4849 if (ztest_random(10) < 6) {
4850 int flags = (ztest_random(2) == 0 ?
4851 ZFS_OFFLINE_TEMPORARY : 0);
4854 * We have to grab the zs_name_lock as writer to
4855 * prevent a race between offlining a slog and
4856 * destroying a dataset. Offlining the slog will
4857 * grab a reference on the dataset which may cause
4858 * dmu_objset_destroy() to fail with EBUSY thus
4859 * leaving the dataset in an inconsistent state.
4862 (void) rw_enter(&ztest_name_lock,
4865 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4868 (void) rw_exit(&ztest_name_lock);
4870 (void) vdev_online(spa, guid0, 0, NULL);
4878 * We have at least single-fault tolerance, so inject data corruption.
4880 fd = open(pathrand, O_RDWR);
4882 if (fd == -1) /* we hit a gap in the device namespace */
4885 fsize = lseek(fd, 0, SEEK_END);
4887 while (--iters != 0) {
4888 offset = ztest_random(fsize / (leaves << bshift)) *
4889 (leaves << bshift) + (leaf << bshift) +
4890 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
4892 if (offset >= fsize)
4895 mutex_enter(&ztest_vdev_lock);
4896 if (mirror_save != zs->zs_mirrors) {
4897 mutex_exit(&ztest_vdev_lock);
4902 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
4903 fatal(1, "can't inject bad word at 0x%llx in %s",
4906 mutex_exit(&ztest_vdev_lock);
4908 if (ztest_opts.zo_verbose >= 7)
4909 (void) printf("injected bad word into %s,"
4910 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
4915 umem_free(path0, MAXPATHLEN);
4916 umem_free(pathrand, MAXPATHLEN);
4920 * Verify that DDT repair works as expected.
4923 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
4925 ztest_shared_t *zs = ztest_shared;
4926 spa_t *spa = ztest_spa;
4927 objset_t *os = zd->zd_os;
4929 uint64_t object, blocksize, txg, pattern, psize;
4930 enum zio_checksum checksum = spa_dedup_checksum(spa);
4935 int copies = 2 * ZIO_DEDUPDITTO_MIN;
4938 blocksize = ztest_random_blocksize();
4939 blocksize = MIN(blocksize, 2048); /* because we write so many */
4941 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4942 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4944 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
4945 umem_free(od, sizeof(ztest_od_t));
4950 * Take the name lock as writer to prevent anyone else from changing
4951 * the pool and dataset properies we need to maintain during this test.
4953 (void) rw_enter(&ztest_name_lock, RW_WRITER);
4955 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
4957 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
4959 (void) rw_exit(&ztest_name_lock);
4960 umem_free(od, sizeof(ztest_od_t));
4964 object = od[0].od_object;
4965 blocksize = od[0].od_blocksize;
4966 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
4968 ASSERT(object != 0);
4970 tx = dmu_tx_create(os);
4971 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
4972 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
4974 (void) rw_exit(&ztest_name_lock);
4975 umem_free(od, sizeof(ztest_od_t));
4980 * Write all the copies of our block.
4982 for (i = 0; i < copies; i++) {
4983 uint64_t offset = i * blocksize;
4984 VERIFY(dmu_buf_hold(os, object, offset, FTAG, &db,
4985 DMU_READ_NO_PREFETCH) == 0);
4986 ASSERT(db->db_offset == offset);
4987 ASSERT(db->db_size == blocksize);
4988 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
4989 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
4990 dmu_buf_will_fill(db, tx);
4991 ztest_pattern_set(db->db_data, db->db_size, pattern);
4992 dmu_buf_rele(db, FTAG);
4996 txg_wait_synced(spa_get_dsl(spa), txg);
4999 * Find out what block we got.
5001 VERIFY(dmu_buf_hold(os, object, 0, FTAG, &db,
5002 DMU_READ_NO_PREFETCH) == 0);
5003 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
5004 dmu_buf_rele(db, FTAG);
5007 * Damage the block. Dedup-ditto will save us when we read it later.
5009 psize = BP_GET_PSIZE(&blk);
5010 buf = zio_buf_alloc(psize);
5011 ztest_pattern_set(buf, psize, ~pattern);
5013 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
5014 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
5015 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
5017 zio_buf_free(buf, psize);
5019 (void) rw_exit(&ztest_name_lock);
5020 umem_free(od, sizeof(ztest_od_t));
5028 ztest_scrub(ztest_ds_t *zd, uint64_t id)
5030 spa_t *spa = ztest_spa;
5032 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5033 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
5034 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5038 * Change the guid for the pool.
5042 ztest_reguid(ztest_ds_t *zd, uint64_t id)
5044 spa_t *spa = ztest_spa;
5045 uint64_t orig, load;
5048 orig = spa_guid(spa);
5049 load = spa_load_guid(spa);
5051 (void) rw_enter(&ztest_name_lock, RW_WRITER);
5052 error = spa_change_guid(spa);
5053 (void) rw_exit(&ztest_name_lock);
5058 if (ztest_opts.zo_verbose >= 3) {
5059 (void) printf("Changed guid old %llu -> %llu\n",
5060 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
5063 VERIFY3U(orig, !=, spa_guid(spa));
5064 VERIFY3U(load, ==, spa_load_guid(spa));
5068 * Rename the pool to a different name and then rename it back.
5072 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
5074 char *oldname, *newname;
5077 (void) rw_enter(&ztest_name_lock, RW_WRITER);
5079 oldname = ztest_opts.zo_pool;
5080 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
5081 (void) strcpy(newname, oldname);
5082 (void) strcat(newname, "_tmp");
5087 VERIFY3U(0, ==, spa_rename(oldname, newname));
5090 * Try to open it under the old name, which shouldn't exist
5092 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5095 * Open it under the new name and make sure it's still the same spa_t.
5097 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5099 ASSERT(spa == ztest_spa);
5100 spa_close(spa, FTAG);
5103 * Rename it back to the original
5105 VERIFY3U(0, ==, spa_rename(newname, oldname));
5108 * Make sure it can still be opened
5110 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5112 ASSERT(spa == ztest_spa);
5113 spa_close(spa, FTAG);
5115 umem_free(newname, strlen(newname) + 1);
5117 (void) rw_exit(&ztest_name_lock);
5121 * Verify pool integrity by running zdb.
5124 ztest_run_zdb(char *pool)
5132 bin = umem_alloc(MAXPATHLEN + MAXNAMELEN + 20, UMEM_NOFAIL);
5133 zdb = umem_alloc(MAXPATHLEN + MAXNAMELEN + 20, UMEM_NOFAIL);
5134 zbuf = umem_alloc(1024, UMEM_NOFAIL);
5136 VERIFY(realpath(getexecname(), bin) != NULL);
5137 if (strncmp(bin, "/usr/sbin/ztest", 15) == 0) {
5138 strcpy(bin, "/usr/sbin/zdb"); /* Installed */
5139 } else if (strncmp(bin, "/sbin/ztest", 11) == 0) {
5140 strcpy(bin, "/sbin/zdb"); /* Installed */
5142 strstr(bin, "/ztest/")[0] = '\0'; /* In-tree */
5143 strcat(bin, "/zdb/zdb");
5147 "%s -bcc%s%s -U %s %s",
5149 ztest_opts.zo_verbose >= 3 ? "s" : "",
5150 ztest_opts.zo_verbose >= 4 ? "v" : "",
5154 if (ztest_opts.zo_verbose >= 5)
5155 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
5157 fp = popen(zdb, "r");
5159 while (fgets(zbuf, 1024, fp) != NULL)
5160 if (ztest_opts.zo_verbose >= 3)
5161 (void) printf("%s", zbuf);
5163 status = pclose(fp);
5168 ztest_dump_core = 0;
5169 if (WIFEXITED(status))
5170 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
5172 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
5174 umem_free(bin, MAXPATHLEN + MAXNAMELEN + 20);
5175 umem_free(zdb, MAXPATHLEN + MAXNAMELEN + 20);
5176 umem_free(zbuf, 1024);
5180 ztest_walk_pool_directory(char *header)
5184 if (ztest_opts.zo_verbose >= 6)
5185 (void) printf("%s\n", header);
5187 mutex_enter(&spa_namespace_lock);
5188 while ((spa = spa_next(spa)) != NULL)
5189 if (ztest_opts.zo_verbose >= 6)
5190 (void) printf("\t%s\n", spa_name(spa));
5191 mutex_exit(&spa_namespace_lock);
5195 ztest_spa_import_export(char *oldname, char *newname)
5197 nvlist_t *config, *newconfig;
5201 if (ztest_opts.zo_verbose >= 4) {
5202 (void) printf("import/export: old = %s, new = %s\n",
5207 * Clean up from previous runs.
5209 (void) spa_destroy(newname);
5212 * Get the pool's configuration and guid.
5214 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5217 * Kick off a scrub to tickle scrub/export races.
5219 if (ztest_random(2) == 0)
5220 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5222 pool_guid = spa_guid(spa);
5223 spa_close(spa, FTAG);
5225 ztest_walk_pool_directory("pools before export");
5230 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5232 ztest_walk_pool_directory("pools after export");
5237 newconfig = spa_tryimport(config);
5238 ASSERT(newconfig != NULL);
5239 nvlist_free(newconfig);
5242 * Import it under the new name.
5244 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0));
5246 ztest_walk_pool_directory("pools after import");
5249 * Try to import it again -- should fail with EEXIST.
5251 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5254 * Try to import it under a different name -- should fail with EEXIST.
5256 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5259 * Verify that the pool is no longer visible under the old name.
5261 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5264 * Verify that we can open and close the pool using the new name.
5266 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5267 ASSERT(pool_guid == spa_guid(spa));
5268 spa_close(spa, FTAG);
5270 nvlist_free(config);
5274 ztest_resume(spa_t *spa)
5276 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5277 (void) printf("resuming from suspended state\n");
5278 spa_vdev_state_enter(spa, SCL_NONE);
5279 vdev_clear(spa, NULL);
5280 (void) spa_vdev_state_exit(spa, NULL, 0);
5281 (void) zio_resume(spa);
5285 ztest_resume_thread(void *arg)
5289 while (!ztest_exiting) {
5290 if (spa_suspended(spa))
5292 (void) poll(NULL, 0, 100);
5304 ztest_deadman_alarm(int sig)
5306 fatal(0, "failed to complete within %d seconds of deadline", GRACE);
5311 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5313 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5314 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5315 hrtime_t functime = gethrtime();
5318 for (i = 0; i < zi->zi_iters; i++)
5319 zi->zi_func(zd, id);
5321 functime = gethrtime() - functime;
5323 atomic_add_64(&zc->zc_count, 1);
5324 atomic_add_64(&zc->zc_time, functime);
5326 if (ztest_opts.zo_verbose >= 4) {
5328 (void) dladdr((void *)zi->zi_func, &dli);
5329 (void) printf("%6.2f sec in %s\n",
5330 (double)functime / NANOSEC, dli.dli_sname);
5335 ztest_thread(void *arg)
5338 uint64_t id = (uintptr_t)arg;
5339 ztest_shared_t *zs = ztest_shared;
5343 ztest_shared_callstate_t *zc;
5345 while ((now = gethrtime()) < zs->zs_thread_stop) {
5347 * See if it's time to force a crash.
5349 if (now > zs->zs_thread_kill)
5353 * If we're getting ENOSPC with some regularity, stop.
5355 if (zs->zs_enospc_count > 10)
5359 * Pick a random function to execute.
5361 rand = ztest_random(ZTEST_FUNCS);
5362 zi = &ztest_info[rand];
5363 zc = ZTEST_GET_SHARED_CALLSTATE(rand);
5364 call_next = zc->zc_next;
5366 if (now >= call_next &&
5367 atomic_cas_64(&zc->zc_next, call_next, call_next +
5368 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
5369 ztest_execute(rand, zi, id);
5379 ztest_dataset_name(char *dsname, char *pool, int d)
5381 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5385 ztest_dataset_destroy(int d)
5387 char name[MAXNAMELEN];
5390 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5392 if (ztest_opts.zo_verbose >= 3)
5393 (void) printf("Destroying %s to free up space\n", name);
5396 * Cleanup any non-standard clones and snapshots. In general,
5397 * ztest thread t operates on dataset (t % zopt_datasets),
5398 * so there may be more than one thing to clean up.
5400 for (t = d; t < ztest_opts.zo_threads;
5401 t += ztest_opts.zo_datasets)
5402 ztest_dsl_dataset_cleanup(name, t);
5404 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5405 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5409 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5411 uint64_t usedobjs, dirobjs, scratch;
5414 * ZTEST_DIROBJ is the object directory for the entire dataset.
5415 * Therefore, the number of objects in use should equal the
5416 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5417 * If not, we have an object leak.
5419 * Note that we can only check this in ztest_dataset_open(),
5420 * when the open-context and syncing-context values agree.
5421 * That's because zap_count() returns the open-context value,
5422 * while dmu_objset_space() returns the rootbp fill count.
5424 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5425 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5426 ASSERT3U(dirobjs + 1, ==, usedobjs);
5430 ztest_dataset_open(int d)
5432 ztest_ds_t *zd = &ztest_ds[d];
5433 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5436 char name[MAXNAMELEN];
5439 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5441 (void) rw_enter(&ztest_name_lock, RW_READER);
5443 error = ztest_dataset_create(name);
5444 if (error == ENOSPC) {
5445 (void) rw_exit(&ztest_name_lock);
5446 ztest_record_enospc(FTAG);
5449 ASSERT(error == 0 || error == EEXIST);
5451 VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0);
5452 (void) rw_exit(&ztest_name_lock);
5454 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5456 zilog = zd->zd_zilog;
5458 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5459 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5460 fatal(0, "missing log records: claimed %llu < committed %llu",
5461 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5463 ztest_dataset_dirobj_verify(zd);
5465 zil_replay(os, zd, ztest_replay_vector);
5467 ztest_dataset_dirobj_verify(zd);
5469 if (ztest_opts.zo_verbose >= 6)
5470 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5472 (u_longlong_t)zilog->zl_parse_blk_count,
5473 (u_longlong_t)zilog->zl_parse_lr_count,
5474 (u_longlong_t)zilog->zl_replaying_seq);
5476 zilog = zil_open(os, ztest_get_data);
5478 if (zilog->zl_replaying_seq != 0 &&
5479 zilog->zl_replaying_seq < committed_seq)
5480 fatal(0, "missing log records: replayed %llu < committed %llu",
5481 zilog->zl_replaying_seq, committed_seq);
5487 ztest_dataset_close(int d)
5489 ztest_ds_t *zd = &ztest_ds[d];
5491 zil_close(zd->zd_zilog);
5492 dmu_objset_rele(zd->zd_os, zd);
5498 * Kick off threads to run tests on all datasets in parallel.
5501 ztest_run(ztest_shared_t *zs)
5506 kthread_t *resume_thread;
5511 ztest_exiting = B_FALSE;
5514 * Initialize parent/child shared state.
5516 mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
5517 rw_init(&ztest_name_lock, NULL, RW_DEFAULT, NULL);
5519 zs->zs_thread_start = gethrtime();
5520 zs->zs_thread_stop =
5521 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5522 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5523 zs->zs_thread_kill = zs->zs_thread_stop;
5524 if (ztest_random(100) < ztest_opts.zo_killrate) {
5525 zs->zs_thread_kill -=
5526 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5529 mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL);
5531 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5532 offsetof(ztest_cb_data_t, zcd_node));
5537 kernel_init(FREAD | FWRITE);
5538 VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0);
5539 spa->spa_debug = B_TRUE;
5542 VERIFY3U(0, ==, dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os));
5543 zs->zs_guid = dmu_objset_fsid_guid(os);
5544 dmu_objset_rele(os, FTAG);
5546 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5549 * We don't expect the pool to suspend unless maxfaults == 0,
5550 * in which case ztest_fault_inject() temporarily takes away
5551 * the only valid replica.
5553 if (MAXFAULTS() == 0)
5554 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5556 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5559 * Create a thread to periodically resume suspended I/O.
5561 VERIFY3P((resume_thread = zk_thread_create(NULL, 0,
5562 (thread_func_t)ztest_resume_thread, spa, TS_RUN, NULL, 0, 0,
5563 PTHREAD_CREATE_JOINABLE)), !=, NULL);
5567 * Set a deadman alarm to abort() if we hang.
5569 signal(SIGALRM, ztest_deadman_alarm);
5570 alarm((zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + GRACE);
5574 * Verify that we can safely inquire about about any object,
5575 * whether it's allocated or not. To make it interesting,
5576 * we probe a 5-wide window around each power of two.
5577 * This hits all edge cases, including zero and the max.
5579 for (t = 0; t < 64; t++) {
5580 for (d = -5; d <= 5; d++) {
5581 error = dmu_object_info(spa->spa_meta_objset,
5582 (1ULL << t) + d, NULL);
5583 ASSERT(error == 0 || error == ENOENT ||
5589 * If we got any ENOSPC errors on the previous run, destroy something.
5591 if (zs->zs_enospc_count != 0) {
5592 int d = ztest_random(ztest_opts.zo_datasets);
5593 ztest_dataset_destroy(d);
5595 zs->zs_enospc_count = 0;
5597 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (kt_did_t),
5600 if (ztest_opts.zo_verbose >= 4)
5601 (void) printf("starting main threads...\n");
5604 * Kick off all the tests that run in parallel.
5606 for (t = 0; t < ztest_opts.zo_threads; t++) {
5609 if (t < ztest_opts.zo_datasets &&
5610 ztest_dataset_open(t) != 0)
5613 VERIFY3P(thread = zk_thread_create(NULL, 0,
5614 (thread_func_t)ztest_thread,
5615 (void *)(uintptr_t)t, TS_RUN, NULL, 0, 0,
5616 PTHREAD_CREATE_JOINABLE), !=, NULL);
5617 tid[t] = thread->t_tid;
5621 * Wait for all of the tests to complete. We go in reverse order
5622 * so we don't close datasets while threads are still using them.
5624 for (t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5625 thread_join(tid[t]);
5626 if (t < ztest_opts.zo_datasets)
5627 ztest_dataset_close(t);
5630 txg_wait_synced(spa_get_dsl(spa), 0);
5632 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5633 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5635 umem_free(tid, ztest_opts.zo_threads * sizeof (kt_did_t));
5637 /* Kill the resume thread */
5638 ztest_exiting = B_TRUE;
5639 thread_join(resume_thread->t_tid);
5643 * Right before closing the pool, kick off a bunch of async I/O;
5644 * spa_close() should wait for it to complete.
5646 for (object = 1; object < 50; object++)
5647 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5649 /* Verify that at least one commit cb was called in a timely fashion */
5650 if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG)
5651 VERIFY3U(zc_min_txg_delay, ==, 0);
5653 spa_close(spa, FTAG);
5656 * Verify that we can loop over all pools.
5658 mutex_enter(&spa_namespace_lock);
5659 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5660 if (ztest_opts.zo_verbose > 3)
5661 (void) printf("spa_next: found %s\n", spa_name(spa));
5662 mutex_exit(&spa_namespace_lock);
5665 * Verify that we can export the pool and reimport it under a
5668 if (ztest_random(2) == 0) {
5669 char name[MAXNAMELEN];
5670 (void) snprintf(name, MAXNAMELEN, "%s_import",
5671 ztest_opts.zo_pool);
5672 ztest_spa_import_export(ztest_opts.zo_pool, name);
5673 ztest_spa_import_export(name, ztest_opts.zo_pool);
5678 list_destroy(&zcl.zcl_callbacks);
5679 mutex_destroy(&zcl.zcl_callbacks_lock);
5680 rw_destroy(&ztest_name_lock);
5681 mutex_destroy(&ztest_vdev_lock);
5687 ztest_ds_t *zd = &ztest_ds[0];
5691 if (ztest_opts.zo_verbose >= 3)
5692 (void) printf("testing spa_freeze()...\n");
5694 kernel_init(FREAD | FWRITE);
5695 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5696 VERIFY3U(0, ==, ztest_dataset_open(0));
5699 * Force the first log block to be transactionally allocated.
5700 * We have to do this before we freeze the pool -- otherwise
5701 * the log chain won't be anchored.
5703 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5704 ztest_dmu_object_alloc_free(zd, 0);
5705 zil_commit(zd->zd_zilog, 0);
5708 txg_wait_synced(spa_get_dsl(spa), 0);
5711 * Freeze the pool. This stops spa_sync() from doing anything,
5712 * so that the only way to record changes from now on is the ZIL.
5717 * Run tests that generate log records but don't alter the pool config
5718 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5719 * We do a txg_wait_synced() after each iteration to force the txg
5720 * to increase well beyond the last synced value in the uberblock.
5721 * The ZIL should be OK with that.
5723 while (ztest_random(10) != 0 &&
5724 numloops++ < ztest_opts.zo_maxloops) {
5725 ztest_dmu_write_parallel(zd, 0);
5726 ztest_dmu_object_alloc_free(zd, 0);
5727 txg_wait_synced(spa_get_dsl(spa), 0);
5731 * Commit all of the changes we just generated.
5733 zil_commit(zd->zd_zilog, 0);
5734 txg_wait_synced(spa_get_dsl(spa), 0);
5737 * Close our dataset and close the pool.
5739 ztest_dataset_close(0);
5740 spa_close(spa, FTAG);
5744 * Open and close the pool and dataset to induce log replay.
5746 kernel_init(FREAD | FWRITE);
5747 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5748 ASSERT(spa_freeze_txg(spa) == UINT64_MAX);
5749 VERIFY3U(0, ==, ztest_dataset_open(0));
5750 ztest_dataset_close(0);
5752 spa->spa_debug = B_TRUE;
5754 txg_wait_synced(spa_get_dsl(spa), 0);
5755 ztest_reguid(NULL, 0);
5757 spa_close(spa, FTAG);
5762 print_time(hrtime_t t, char *timebuf)
5764 hrtime_t s = t / NANOSEC;
5765 hrtime_t m = s / 60;
5766 hrtime_t h = m / 60;
5767 hrtime_t d = h / 24;
5776 (void) sprintf(timebuf,
5777 "%llud%02lluh%02llum%02llus", d, h, m, s);
5779 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5781 (void) sprintf(timebuf, "%llum%02llus", m, s);
5783 (void) sprintf(timebuf, "%llus", s);
5787 make_random_props(void)
5791 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5792 if (ztest_random(2) == 0)
5794 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5800 * Create a storage pool with the given name and initial vdev size.
5801 * Then test spa_freeze() functionality.
5804 ztest_init(ztest_shared_t *zs)
5807 nvlist_t *nvroot, *props;
5810 mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
5811 rw_init(&ztest_name_lock, NULL, RW_DEFAULT, NULL);
5813 kernel_init(FREAD | FWRITE);
5816 * Create the storage pool.
5818 (void) spa_destroy(ztest_opts.zo_pool);
5819 ztest_shared->zs_vdev_next_leaf = 0;
5821 zs->zs_mirrors = ztest_opts.zo_mirrors;
5822 nvroot = make_vdev_root(NULL, NULL, ztest_opts.zo_vdev_size, 0,
5823 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5824 props = make_random_props();
5825 for (i = 0; i < SPA_FEATURES; i++) {
5827 VERIFY3S(-1, !=, asprintf(&buf, "feature@%s",
5828 spa_feature_table[i].fi_uname));
5829 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
5832 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props,
5834 nvlist_free(nvroot);
5836 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5837 zs->zs_metaslab_sz =
5838 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5839 spa_close(spa, FTAG);
5843 ztest_run_zdb(ztest_opts.zo_pool);
5847 ztest_run_zdb(ztest_opts.zo_pool);
5849 rw_destroy(&ztest_name_lock);
5850 mutex_destroy(&ztest_vdev_lock);
5856 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
5858 ztest_fd_data = mkstemp(ztest_name_data);
5859 ASSERT3S(ztest_fd_data, >=, 0);
5860 (void) unlink(ztest_name_data);
5864 shared_data_size(ztest_shared_hdr_t *hdr)
5868 size = hdr->zh_hdr_size;
5869 size += hdr->zh_opts_size;
5870 size += hdr->zh_size;
5871 size += hdr->zh_stats_size * hdr->zh_stats_count;
5872 size += hdr->zh_ds_size * hdr->zh_ds_count;
5881 ztest_shared_hdr_t *hdr;
5883 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5884 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
5885 ASSERT(hdr != MAP_FAILED);
5887 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
5889 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5890 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5891 hdr->zh_size = sizeof (ztest_shared_t);
5892 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5893 hdr->zh_stats_count = ZTEST_FUNCS;
5894 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5895 hdr->zh_ds_count = ztest_opts.zo_datasets;
5897 size = shared_data_size(hdr);
5898 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size));
5900 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5907 ztest_shared_hdr_t *hdr;
5910 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5911 PROT_READ, MAP_SHARED, ztest_fd_data, 0);
5912 ASSERT(hdr != MAP_FAILED);
5914 size = shared_data_size(hdr);
5916 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5917 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5918 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
5919 ASSERT(hdr != MAP_FAILED);
5920 buf = (uint8_t *)hdr;
5922 offset = hdr->zh_hdr_size;
5923 ztest_shared_opts = (void *)&buf[offset];
5924 offset += hdr->zh_opts_size;
5925 ztest_shared = (void *)&buf[offset];
5926 offset += hdr->zh_size;
5927 ztest_shared_callstate = (void *)&buf[offset];
5928 offset += hdr->zh_stats_size * hdr->zh_stats_count;
5929 ztest_shared_ds = (void *)&buf[offset];
5933 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
5937 char *cmdbuf = NULL;
5942 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
5943 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
5948 fatal(1, "fork failed");
5950 if (pid == 0) { /* child */
5951 char *emptyargv[2] = { cmd, NULL };
5952 char fd_data_str[12];
5954 struct rlimit rl = { 1024, 1024 };
5955 (void) setrlimit(RLIMIT_NOFILE, &rl);
5957 (void) close(ztest_fd_rand);
5958 VERIFY(11 >= snprintf(fd_data_str, 12, "%d", ztest_fd_data));
5959 VERIFY(0 == setenv("ZTEST_FD_DATA", fd_data_str, 1));
5961 (void) enable_extended_FILE_stdio(-1, -1);
5962 if (libpath != NULL)
5963 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
5964 (void) execv(cmd, emptyargv);
5965 ztest_dump_core = B_FALSE;
5966 fatal(B_TRUE, "exec failed: %s", cmd);
5969 if (cmdbuf != NULL) {
5970 umem_free(cmdbuf, MAXPATHLEN);
5974 while (waitpid(pid, &status, 0) != pid)
5976 if (statusp != NULL)
5979 if (WIFEXITED(status)) {
5980 if (WEXITSTATUS(status) != 0) {
5981 (void) fprintf(stderr, "child exited with code %d\n",
5982 WEXITSTATUS(status));
5986 } else if (WIFSIGNALED(status)) {
5987 if (!ignorekill || WTERMSIG(status) != SIGKILL) {
5988 (void) fprintf(stderr, "child died with signal %d\n",
5994 (void) fprintf(stderr, "something strange happened to child\n");
6001 ztest_run_init(void)
6005 ztest_shared_t *zs = ztest_shared;
6007 ASSERT(ztest_opts.zo_init != 0);
6010 * Blow away any existing copy of zpool.cache
6012 (void) remove(spa_config_path);
6015 * Create and initialize our storage pool.
6017 for (i = 1; i <= ztest_opts.zo_init; i++) {
6018 bzero(zs, sizeof (ztest_shared_t));
6019 if (ztest_opts.zo_verbose >= 3 &&
6020 ztest_opts.zo_init != 1) {
6021 (void) printf("ztest_init(), pass %d\n", i);
6028 main(int argc, char **argv)
6036 ztest_shared_callstate_t *zc;
6043 char *fd_data_str = getenv("ZTEST_FD_DATA");
6045 (void) setvbuf(stdout, NULL, _IOLBF, 0);
6047 ztest_fd_rand = open("/dev/urandom", O_RDONLY);
6048 ASSERT3S(ztest_fd_rand, >=, 0);
6051 dprintf_setup(&argc, argv);
6052 process_options(argc, argv);
6057 bcopy(&ztest_opts, ztest_shared_opts,
6058 sizeof (*ztest_shared_opts));
6060 ztest_fd_data = atoi(fd_data_str);
6062 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
6064 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
6066 /* Override location of zpool.cache */
6067 VERIFY(asprintf((char **)&spa_config_path, "%s/zpool.cache",
6068 ztest_opts.zo_dir) != -1);
6070 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
6075 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
6076 metaslab_df_alloc_threshold =
6077 zs->zs_metaslab_df_alloc_threshold;
6086 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
6088 if (ztest_opts.zo_verbose >= 1) {
6089 (void) printf("%llu vdevs, %d datasets, %d threads,"
6090 " %llu seconds...\n",
6091 (u_longlong_t)ztest_opts.zo_vdevs,
6092 ztest_opts.zo_datasets,
6093 ztest_opts.zo_threads,
6094 (u_longlong_t)ztest_opts.zo_time);
6097 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
6098 (void) strlcpy(cmd, getexecname(), MAXNAMELEN);
6100 zs->zs_do_init = B_TRUE;
6101 if (strlen(ztest_opts.zo_alt_ztest) != 0) {
6102 if (ztest_opts.zo_verbose >= 1) {
6103 (void) printf("Executing older ztest for "
6104 "initialization: %s\n", ztest_opts.zo_alt_ztest);
6106 VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
6107 ztest_opts.zo_alt_libpath, B_FALSE, NULL));
6109 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
6111 zs->zs_do_init = B_FALSE;
6113 zs->zs_proc_start = gethrtime();
6114 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
6116 for (f = 0; f < ZTEST_FUNCS; f++) {
6117 zi = &ztest_info[f];
6118 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6119 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
6120 zc->zc_next = UINT64_MAX;
6122 zc->zc_next = zs->zs_proc_start +
6123 ztest_random(2 * zi->zi_interval[0] + 1);
6127 * Run the tests in a loop. These tests include fault injection
6128 * to verify that self-healing data works, and forced crashes
6129 * to verify that we never lose on-disk consistency.
6131 while (gethrtime() < zs->zs_proc_stop) {
6136 * Initialize the workload counters for each function.
6138 for (f = 0; f < ZTEST_FUNCS; f++) {
6139 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6144 /* Set the allocation switch size */
6145 zs->zs_metaslab_df_alloc_threshold =
6146 ztest_random(zs->zs_metaslab_sz / 4) + 1;
6148 if (!hasalt || ztest_random(2) == 0) {
6149 if (hasalt && ztest_opts.zo_verbose >= 1) {
6150 (void) printf("Executing newer ztest: %s\n",
6154 killed = exec_child(cmd, NULL, B_TRUE, &status);
6156 if (hasalt && ztest_opts.zo_verbose >= 1) {
6157 (void) printf("Executing older ztest: %s\n",
6158 ztest_opts.zo_alt_ztest);
6161 killed = exec_child(ztest_opts.zo_alt_ztest,
6162 ztest_opts.zo_alt_libpath, B_TRUE, &status);
6169 if (ztest_opts.zo_verbose >= 1) {
6170 hrtime_t now = gethrtime();
6172 now = MIN(now, zs->zs_proc_stop);
6173 print_time(zs->zs_proc_stop - now, timebuf);
6174 nicenum(zs->zs_space, numbuf);
6176 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
6177 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
6179 WIFEXITED(status) ? "Complete" : "SIGKILL",
6180 (u_longlong_t)zs->zs_enospc_count,
6181 100.0 * zs->zs_alloc / zs->zs_space,
6183 100.0 * (now - zs->zs_proc_start) /
6184 (ztest_opts.zo_time * NANOSEC), timebuf);
6187 if (ztest_opts.zo_verbose >= 2) {
6188 (void) printf("\nWorkload summary:\n\n");
6189 (void) printf("%7s %9s %s\n",
6190 "Calls", "Time", "Function");
6191 (void) printf("%7s %9s %s\n",
6192 "-----", "----", "--------");
6193 for (f = 0; f < ZTEST_FUNCS; f++) {
6196 zi = &ztest_info[f];
6197 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6198 print_time(zc->zc_time, timebuf);
6199 (void) dladdr((void *)zi->zi_func, &dli);
6200 (void) printf("%7llu %9s %s\n",
6201 (u_longlong_t)zc->zc_count, timebuf,
6204 (void) printf("\n");
6208 * It's possible that we killed a child during a rename test,
6209 * in which case we'll have a 'ztest_tmp' pool lying around
6210 * instead of 'ztest'. Do a blind rename in case this happened.
6213 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
6214 spa_close(spa, FTAG);
6216 char tmpname[MAXNAMELEN];
6218 kernel_init(FREAD | FWRITE);
6219 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
6220 ztest_opts.zo_pool);
6221 (void) spa_rename(tmpname, ztest_opts.zo_pool);
6225 ztest_run_zdb(ztest_opts.zo_pool);
6228 if (ztest_opts.zo_verbose >= 1) {
6230 (void) printf("%d runs of older ztest: %s\n", older,
6231 ztest_opts.zo_alt_ztest);
6232 (void) printf("%d runs of newer ztest: %s\n", newer,
6235 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
6236 kills, iters - kills, (100.0 * kills) / MAX(1, iters));
6239 umem_free(cmd, MAXNAMELEN);