X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=cmd%2Fztest%2Fztest.c;h=09d6e9526aa306a27f1fab8f78763c50eb9250d3;hb=d13524579162b35189804c357a63993be758b84c;hp=771f1427e37a4e0d595d7edce245932ab5db2cbb;hpb=b8864a233c569edcc57c686f3ea8cd1ae3b89153;p=zfs.git diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index 771f142..09d6e95 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -20,6 +20,8 @@ */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright 2011 Nexenta Systems, Inc. All rights reserved. */ /* @@ -57,6 +59,9 @@ * the transaction group number is less than the current, open txg. * If you add a new test, please do this if applicable. * + * (7) Threads are created with a reduced stack size, for sanity checking. + * Therefore, it's important not to allocate huge buffers on the stack. + * * When run with no arguments, ztest runs for about five minutes and * produces no output if successful. To get a little bit of information, * specify -V. To get more information, specify -VV, and so on. @@ -168,8 +173,8 @@ typedef enum { typedef struct rll { void *rll_writer; int rll_readers; - mutex_t rll_lock; - cond_t rll_cv; + kmutex_t rll_lock; + kcondvar_t rll_cv; } rll_t; typedef struct rl { @@ -202,11 +207,12 @@ typedef struct ztest_od { */ typedef struct ztest_ds { objset_t *zd_os; + krwlock_t zd_zilog_lock; zilog_t *zd_zilog; uint64_t zd_seq; ztest_od_t *zd_od; /* debugging aid */ char zd_name[MAXNAMELEN]; - mutex_t zd_dirobj_lock; + kmutex_t zd_dirobj_lock; rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; } ztest_ds_t; @@ -235,6 +241,7 @@ ztest_func_t ztest_dmu_commit_callbacks; ztest_func_t ztest_zap; ztest_func_t ztest_zap_parallel; ztest_func_t ztest_zil_commit; +ztest_func_t ztest_zil_remount; ztest_func_t ztest_dmu_read_write_zcopy; ztest_func_t ztest_dmu_objset_create_destroy; ztest_func_t ztest_dmu_prealloc; @@ -254,6 +261,7 @@ ztest_func_t ztest_vdev_LUN_growth; ztest_func_t ztest_vdev_add_remove; ztest_func_t ztest_vdev_aux_add_remove; ztest_func_t ztest_split_pool; +ztest_func_t ztest_reguid; uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ @@ -270,6 +278,7 @@ ztest_info_t ztest_info[] = { { ztest_zap_parallel, 100, &zopt_always }, { ztest_split_pool, 1, &zopt_always }, { ztest_zil_commit, 1, &zopt_incessant }, + { ztest_zil_remount, 1, &zopt_sometimes }, { ztest_dmu_read_write_zcopy, 1, &zopt_often }, { ztest_dmu_objset_create_destroy, 1, &zopt_often }, { ztest_dsl_prop_get_set, 1, &zopt_often }, @@ -283,6 +292,13 @@ ztest_info_t ztest_info[] = { { ztest_fault_inject, 1, &zopt_sometimes }, { ztest_ddt_repair, 1, &zopt_sometimes }, { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, + /* + * The reguid test is currently broken. Disable it until + * we get around to fixing it. + */ +#if 0 + { ztest_reguid, 1, &zopt_sometimes }, +#endif { ztest_spa_rename, 1, &zopt_rarely }, { ztest_scrub, 1, &zopt_rarely }, { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, @@ -299,8 +315,8 @@ ztest_info_t ztest_info[] = { * The callbacks are ordered by txg number. */ typedef struct ztest_cb_list { - mutex_t zcl_callbacks_lock; - list_t zcl_callbacks; + kmutex_t zcl_callbacks_lock; + list_t zcl_callbacks; } ztest_cb_list_t; /* @@ -319,8 +335,9 @@ typedef struct ztest_shared { uint64_t zs_vdev_aux; uint64_t zs_alloc; uint64_t zs_space; - mutex_t zs_vdev_lock; - rwlock_t zs_name_lock; + uint64_t zs_guid; + kmutex_t zs_vdev_lock; + krwlock_t zs_name_lock; ztest_info_t zs_info[ZTEST_FUNCS]; uint64_t zs_splits; uint64_t zs_mirrors; @@ -341,6 +358,22 @@ static boolean_t ztest_exiting; /* Global commit callback list */ static ztest_cb_list_t zcl; +/* Commit cb delay */ +static uint64_t zc_min_txg_delay = UINT64_MAX; +static int zc_cb_counter = 0; + +/* + * Minimum number of commit callbacks that need to be registered for us to check + * whether the minimum txg delay is acceptable. + */ +#define ZTEST_COMMIT_CB_MIN_REG 100 + +/* + * If a number of txgs equal to this threshold have been created after a commit + * callback has been registered but not called, then we assume there is an + * implementation bug. + */ +#define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000) extern uint64_t metaslab_gang_bang; extern uint64_t metaslab_df_alloc_threshold; @@ -359,7 +392,7 @@ static void usage(boolean_t) __NORETURN; * debugging facilities. */ const char * -_umem_debug_init() +_umem_debug_init(void) { return ("default,verbose"); /* $UMEM_DEBUG setting */ } @@ -379,9 +412,10 @@ fatal(int do_perror, char *message, ...) { va_list args; int save_errno = errno; - char buf[FATAL_MSG_SZ]; + char *buf; (void) fflush(stdout); + buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL); va_start(args, message); (void) sprintf(buf, "ztest: "); @@ -646,10 +680,12 @@ ztest_get_ashift(void) static nvlist_t * make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift) { - char pathbuf[MAXPATHLEN]; + char *pathbuf; uint64_t vdev; nvlist_t *file; + pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); + if (ashift == 0) ashift = ztest_get_ashift(); @@ -680,6 +716,7 @@ make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift) VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); + umem_free(pathbuf, MAXPATHLEN); return (file); } @@ -822,7 +859,7 @@ ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, { const char *propname = zfs_prop_to_name(prop); const char *valname; - char setpoint[MAXPATHLEN]; + char *setpoint; uint64_t curval; int error; @@ -836,6 +873,7 @@ ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, } ASSERT3U(error, ==, 0); + setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval), 1, &curval, setpoint), ==, 0); @@ -844,6 +882,7 @@ ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, (void) printf("%s %s = %s at '%s'\n", osname, propname, valname, setpoint); } + umem_free(setpoint, MAXPATHLEN); return (error); } @@ -876,8 +915,8 @@ ztest_rll_init(rll_t *rll) { rll->rll_writer = NULL; rll->rll_readers = 0; - VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); - VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); + mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL); } static void @@ -885,32 +924,32 @@ ztest_rll_destroy(rll_t *rll) { ASSERT(rll->rll_writer == NULL); ASSERT(rll->rll_readers == 0); - VERIFY(_mutex_destroy(&rll->rll_lock) == 0); - VERIFY(cond_destroy(&rll->rll_cv) == 0); + mutex_destroy(&rll->rll_lock); + cv_destroy(&rll->rll_cv); } static void ztest_rll_lock(rll_t *rll, rl_type_t type) { - VERIFY(mutex_lock(&rll->rll_lock) == 0); + mutex_enter(&rll->rll_lock); if (type == RL_READER) { while (rll->rll_writer != NULL) - (void) cond_wait(&rll->rll_cv, &rll->rll_lock); + (void) cv_wait(&rll->rll_cv, &rll->rll_lock); rll->rll_readers++; } else { while (rll->rll_writer != NULL || rll->rll_readers) - (void) cond_wait(&rll->rll_cv, &rll->rll_lock); + (void) cv_wait(&rll->rll_cv, &rll->rll_lock); rll->rll_writer = curthread; } - VERIFY(mutex_unlock(&rll->rll_lock) == 0); + mutex_exit(&rll->rll_lock); } static void ztest_rll_unlock(rll_t *rll) { - VERIFY(mutex_lock(&rll->rll_lock) == 0); + mutex_enter(&rll->rll_lock); if (rll->rll_writer) { ASSERT(rll->rll_readers == 0); @@ -922,9 +961,9 @@ ztest_rll_unlock(rll_t *rll) } if (rll->rll_writer == NULL && rll->rll_readers == 0) - VERIFY(cond_broadcast(&rll->rll_cv) == 0); + cv_broadcast(&rll->rll_cv); - VERIFY(mutex_unlock(&rll->rll_lock) == 0); + mutex_exit(&rll->rll_lock); } static void @@ -981,7 +1020,8 @@ ztest_zd_init(ztest_ds_t *zd, objset_t *os) dmu_objset_name(os, zd->zd_name); int l; - VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); + rw_init(&zd->zd_zilog_lock, NULL, RW_DEFAULT, NULL); + mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL); for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) ztest_rll_init(&zd->zd_object_lock[l]); @@ -995,7 +1035,8 @@ ztest_zd_fini(ztest_ds_t *zd) { int l; - VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); + mutex_destroy(&zd->zd_dirobj_lock); + rw_destroy(&zd->zd_zilog_lock); for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) ztest_rll_destroy(&zd->zd_object_lock[l]); @@ -1042,6 +1083,7 @@ ztest_pattern_set(void *buf, uint64_t size, uint64_t value) *ip++ = value; } +#ifndef NDEBUG static boolean_t ztest_pattern_match(void *buf, uint64_t size, uint64_t value) { @@ -1054,6 +1096,7 @@ ztest_pattern_match(void *buf, uint64_t size, uint64_t value) return (diff == 0); } +#endif static void ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, @@ -1579,26 +1622,26 @@ ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) } zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { - NULL, /* 0 no such transaction type */ - ztest_replay_create, /* TX_CREATE */ - NULL, /* TX_MKDIR */ - NULL, /* TX_MKXATTR */ - NULL, /* TX_SYMLINK */ - ztest_replay_remove, /* TX_REMOVE */ - NULL, /* TX_RMDIR */ - NULL, /* TX_LINK */ - NULL, /* TX_RENAME */ - ztest_replay_write, /* TX_WRITE */ - ztest_replay_truncate, /* TX_TRUNCATE */ - ztest_replay_setattr, /* TX_SETATTR */ - NULL, /* TX_ACL */ - NULL, /* TX_CREATE_ACL */ - NULL, /* TX_CREATE_ATTR */ - NULL, /* TX_CREATE_ACL_ATTR */ - NULL, /* TX_MKDIR_ACL */ - NULL, /* TX_MKDIR_ATTR */ - NULL, /* TX_MKDIR_ACL_ATTR */ - NULL, /* TX_WRITE2 */ + NULL, /* 0 no such transaction type */ + (zil_replay_func_t *)ztest_replay_create, /* TX_CREATE */ + NULL, /* TX_MKDIR */ + NULL, /* TX_MKXATTR */ + NULL, /* TX_SYMLINK */ + (zil_replay_func_t *)ztest_replay_remove, /* TX_REMOVE */ + NULL, /* TX_RMDIR */ + NULL, /* TX_LINK */ + NULL, /* TX_RENAME */ + (zil_replay_func_t *)ztest_replay_write, /* TX_WRITE */ + (zil_replay_func_t *)ztest_replay_truncate, /* TX_TRUNCATE */ + (zil_replay_func_t *)ztest_replay_setattr, /* TX_SETATTR */ + NULL, /* TX_ACL */ + NULL, /* TX_CREATE_ACL */ + NULL, /* TX_CREATE_ATTR */ + NULL, /* TX_CREATE_ACL_ATTR */ + NULL, /* TX_MKDIR_ACL */ + NULL, /* TX_MKDIR_ATTR */ + NULL, /* TX_MKDIR_ACL_ATTR */ + NULL, /* TX_WRITE2 */ }; /* @@ -1736,7 +1779,7 @@ ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) int error; int i; - ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + ASSERT(mutex_held(&zd->zd_dirobj_lock)); for (i = 0; i < count; i++, od++) { od->od_object = 0; @@ -1777,7 +1820,7 @@ ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) int missing = 0; int i; - ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + ASSERT(mutex_held(&zd->zd_dirobj_lock)); for (i = 0; i < count; i++, od++) { if (missing) { @@ -1823,7 +1866,7 @@ ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) int error; int i; - ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + ASSERT(mutex_held(&zd->zd_dirobj_lock)); od += count - 1; @@ -1965,6 +2008,8 @@ ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) if (ztest_random(2) == 0) io_type = ZTEST_IO_WRITE_TAG; + (void) rw_enter(&zd->zd_zilog_lock, RW_READER); + switch (io_type) { case ZTEST_IO_WRITE_TAG: @@ -1998,8 +2043,12 @@ ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) case ZTEST_IO_SETATTR: (void) ztest_setattr(zd, object); break; + default: + break; } + (void) rw_exit(&zd->zd_zilog_lock); + umem_free(data, blocksize); } @@ -2037,13 +2086,13 @@ ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) int count = size / sizeof (*od); int rv = 0; - VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); + mutex_enter(&zd->zd_dirobj_lock); if ((ztest_lookup(zd, od, count) != 0 || remove) && (ztest_remove(zd, od, count) != 0 || ztest_create(zd, od, count) != 0)) rv = -1; zd->zd_od = od; - VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); + mutex_exit(&zd->zd_dirobj_lock); return (rv); } @@ -2054,6 +2103,8 @@ ztest_zil_commit(ztest_ds_t *zd, uint64_t id) { zilog_t *zilog = zd->zd_zilog; + (void) rw_enter(&zd->zd_zilog_lock, RW_READER); + zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); /* @@ -2065,6 +2116,31 @@ ztest_zil_commit(ztest_ds_t *zd, uint64_t id) ASSERT(zd->zd_seq <= zilog->zl_commit_lr_seq); zd->zd_seq = zilog->zl_commit_lr_seq; mutex_exit(&zilog->zl_lock); + + (void) rw_exit(&zd->zd_zilog_lock); +} + +/* + * This function is designed to simulate the operations that occur during a + * mount/unmount operation. We hold the dataset across these operations in an + * attempt to expose any implicit assumptions about ZIL management. + */ +/* ARGSUSED */ +void +ztest_zil_remount(ztest_ds_t *zd, uint64_t id) +{ + objset_t *os = zd->zd_os; + + (void) rw_enter(&zd->zd_zilog_lock, RW_WRITER); + + /* zfs_sb_teardown() */ + zil_close(zd->zd_zilog); + + /* zfsvfs_setup() */ + VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); + zil_replay(os, zd, ztest_replay_vector); + + (void) rw_exit(&zd->zd_zilog_lock); } /* @@ -2099,7 +2175,7 @@ ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) * Attempt to create an existing pool. It shouldn't matter * what's in the nvroot; we should fail with EEXIST. */ - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); VERIFY3U(EEXIST, ==, spa_create(zs->zs_pool, nvroot, NULL, NULL, NULL)); nvlist_free(nvroot); @@ -2107,7 +2183,7 @@ ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) VERIFY3U(EBUSY, ==, spa_destroy(zs->zs_pool)); spa_close(spa, FTAG); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } static vdev_t * @@ -2161,7 +2237,7 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) nvlist_t *nvroot; int error; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * zopt_raidz; spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -2187,9 +2263,9 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) * dmu_objset_destroy() to fail with EBUSY thus * leaving the dataset in an inconsistent state. */ - VERIFY(rw_wrlock(&ztest_shared->zs_name_lock) == 0); + rw_enter(&ztest_shared->zs_name_lock, RW_WRITER); error = spa_vdev_remove(spa, guid, B_FALSE); - VERIFY(rw_unlock(&ztest_shared->zs_name_lock) == 0); + rw_exit(&ztest_shared->zs_name_lock); if (error && error != EEXIST) fatal(0, "spa_vdev_remove() = %d", error); @@ -2211,7 +2287,7 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) fatal(0, "spa_vdev_add() = %d", error); } - VERIFY(mutex_unlock(&ztest_shared->zs_vdev_lock) == 0); + mutex_exit(&ztest_shared->zs_vdev_lock); } /* @@ -2226,9 +2302,12 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) vdev_t *rvd = spa->spa_root_vdev; spa_aux_vdev_t *sav; char *aux; + char *path; uint64_t guid = 0; int error; + path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); + if (ztest_random(2) == 0) { sav = &spa->spa_spares; aux = ZPOOL_CONFIG_SPARES; @@ -2237,7 +2316,7 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) aux = ZPOOL_CONFIG_L2CACHE; } - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -2252,7 +2331,6 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) */ zs->zs_vdev_aux = 0; for (;;) { - char path[MAXPATHLEN]; int c; (void) sprintf(path, ztest_aux_template, zopt_dir, zopt_pool, aux, zs->zs_vdev_aux); @@ -2293,7 +2371,9 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); } - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); + + umem_free(path, MAXPATHLEN); } /* @@ -2310,11 +2390,11 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id) uint_t c, children, schildren = 0, lastlogid = 0; int error = 0; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); /* ensure we have a useable config; mirrors of raidz aren't supported */ if (zs->zs_mirrors < 3 || zopt_raidz > 1) { - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } @@ -2373,9 +2453,9 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id) spa_config_exit(spa, SCL_VDEV, FTAG); - (void) rw_wrlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_WRITER); error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); nvlist_free(config); @@ -2388,7 +2468,7 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id) ++zs->zs_splits; --zs->zs_mirrors; } - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); } @@ -2410,14 +2490,17 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) uint64_t ashift = ztest_get_ashift(); uint64_t oldguid, pguid; size_t oldsize, newsize; - char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; + char *oldpath, *newpath; int replacing; int oldvd_has_siblings = B_FALSE; int newvd_is_spare = B_FALSE; int oldvd_is_log; int error, expected_error; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); + newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); + + mutex_enter(&zs->zs_vdev_lock); leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz; spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -2478,8 +2561,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) if (error != 0 && error != ENODEV && error != EBUSY && error != ENOTSUP) fatal(0, "detach (%s) returned %d", oldpath, error); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); - return; + goto out; } /* @@ -2491,7 +2573,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) newvd_is_spare = B_TRUE; (void) strcpy(newpath, newvd->vdev_path); } else { - (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, + (void) snprintf(newpath, MAXPATHLEN, ztest_dev_template, zopt_dir, zopt_pool, top * leaves + leaf); if (ztest_random(2) == 0) newpath[strlen(newpath) - 1] = 'b'; @@ -2570,8 +2652,11 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) oldpath, (longlong_t)oldsize, newpath, (longlong_t)newsize, replacing, error, expected_error); } +out: + mutex_exit(&zs->zs_vdev_lock); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + umem_free(oldpath, MAXPATHLEN); + umem_free(newpath, MAXPATHLEN); } /* @@ -2580,7 +2665,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) vdev_t * grow_vdev(vdev_t *vd, void *arg) { - spa_t *spa = vd->vdev_spa; + ASSERTV(spa_t *spa = vd->vdev_spa); size_t *newsize = arg; size_t fsize; int fd; @@ -2592,7 +2677,7 @@ grow_vdev(vdev_t *vd, void *arg) return (vd); fsize = lseek(fd, 0, SEEK_END); - (void) ftruncate(fd, *newsize); + VERIFY(ftruncate(fd, *newsize) == 0); if (zopt_verbose >= 6) { (void) printf("%s grew from %lu to %lu bytes\n", @@ -2702,7 +2787,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) uint64_t top; uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); spa_config_enter(spa, SCL_STATE, spa, RW_READER); top = ztest_random_vdev_top(spa, B_TRUE); @@ -2730,7 +2815,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) if (tvd->vdev_state != VDEV_STATE_HEALTHY || psize == 0 || psize >= 4 * zopt_vdev_size) { spa_config_exit(spa, SCL_STATE, spa); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } ASSERT(psize > 0); @@ -2755,7 +2840,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) "the vdev configuration changed.\n"); } spa_config_exit(spa, SCL_STATE, spa); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } @@ -2789,7 +2874,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) "intervening vdev offline or remove.\n"); } spa_config_exit(spa, SCL_STATE, spa); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } @@ -2817,7 +2902,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) } spa_config_exit(spa, SCL_STATE, spa); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); } /* @@ -2844,7 +2929,8 @@ ztest_dataset_create(char *dsname) if (err || zilset < 80) return (err); - (void) printf("Setting dataset %s to sync always\n", dsname); + if (zopt_verbose >= 5) + (void) printf("Setting dataset %s to sync always\n", dsname); return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, ZFS_SYNC_ALWAYS, B_FALSE)); } @@ -2917,15 +3003,18 @@ void ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) { ztest_shared_t *zs = ztest_shared; - ztest_ds_t zdtmp; + ztest_ds_t *zdtmp; int iters; int error; objset_t *os, *os2; - char name[MAXNAMELEN]; + char *name; zilog_t *zilog; int i; - (void) rw_rdlock(&zs->zs_name_lock); + zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL); + name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + + (void) rw_enter(&zs->zs_name_lock, RW_READER); (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", zs->zs_pool, (u_longlong_t)id); @@ -2937,9 +3026,9 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) */ if (ztest_random(2) == 0 && dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) { - ztest_zd_init(&zdtmp, os); - zil_replay(os, &zdtmp, ztest_replay_vector); - ztest_zd_fini(&zdtmp); + ztest_zd_init(zdtmp, os); + zil_replay(os, zdtmp, ztest_replay_vector); + ztest_zd_fini(zdtmp); dmu_objset_disown(os, FTAG); } @@ -2963,8 +3052,7 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) if (error) { if (error == ENOSPC) { ztest_record_enospc(FTAG); - (void) rw_unlock(&zs->zs_name_lock); - return; + goto out; } fatal(0, "dmu_objset_create(%s) = %d", name, error); } @@ -2972,7 +3060,7 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) VERIFY3U(0, ==, dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os)); - ztest_zd_init(&zdtmp, os); + ztest_zd_init(zdtmp, os); /* * Open the intent log for it. @@ -2985,7 +3073,7 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) */ iters = ztest_random(5); for (i = 0; i < iters; i++) { - ztest_dmu_object_alloc_free(&zdtmp, id); + ztest_dmu_object_alloc_free(zdtmp, id); if (ztest_random(iters) == 0) (void) ztest_snapshot_create(name, i); } @@ -3010,9 +3098,12 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) zil_close(zilog); dmu_objset_disown(os, FTAG); - ztest_zd_fini(&zdtmp); + ztest_zd_fini(zdtmp); +out: + (void) rw_exit(&zs->zs_name_lock); - (void) rw_unlock(&zs->zs_name_lock); + umem_free(name, MAXNAMELEN); + umem_free(zdtmp, sizeof (ztest_ds_t)); } /* @@ -3023,10 +3114,10 @@ ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) { ztest_shared_t *zs = ztest_shared; - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); (void) ztest_snapshot_destroy(zd->zd_name, id); (void) ztest_snapshot_create(zd->zd_name, id); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -3035,13 +3126,19 @@ ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) void ztest_dsl_dataset_cleanup(char *osname, uint64_t id) { - char snap1name[MAXNAMELEN]; - char clone1name[MAXNAMELEN]; - char snap2name[MAXNAMELEN]; - char clone2name[MAXNAMELEN]; - char snap3name[MAXNAMELEN]; + char *snap1name; + char *clone1name; + char *snap2name; + char *clone2name; + char *snap3name; int error; + snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, (u_longlong_t)id); (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", @@ -3068,6 +3165,12 @@ ztest_dsl_dataset_cleanup(char *osname, uint64_t id) error = dmu_objset_destroy(snap1name, B_FALSE); if (error && error != ENOENT) fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error); + + umem_free(snap1name, MAXNAMELEN); + umem_free(clone1name, MAXNAMELEN); + umem_free(snap2name, MAXNAMELEN); + umem_free(clone2name, MAXNAMELEN); + umem_free(snap3name, MAXNAMELEN); } /* @@ -3079,15 +3182,21 @@ ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) ztest_shared_t *zs = ztest_shared; objset_t *clone; dsl_dataset_t *ds; - char snap1name[MAXNAMELEN]; - char clone1name[MAXNAMELEN]; - char snap2name[MAXNAMELEN]; - char clone2name[MAXNAMELEN]; - char snap3name[MAXNAMELEN]; + char *snap1name; + char *clone1name; + char *snap2name; + char *clone2name; + char *snap3name; char *osname = zd->zd_name; int error; - (void) rw_rdlock(&zs->zs_name_lock); + snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); + + (void) rw_enter(&zs->zs_name_lock, RW_READER); ztest_dsl_dataset_cleanup(osname, id); @@ -3172,42 +3281,65 @@ ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) out: ztest_dsl_dataset_cleanup(osname, id); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); + + umem_free(snap1name, MAXNAMELEN); + umem_free(clone1name, MAXNAMELEN); + umem_free(snap2name, MAXNAMELEN); + umem_free(clone2name, MAXNAMELEN); + umem_free(snap3name, MAXNAMELEN); } +#undef OD_ARRAY_SIZE +#define OD_ARRAY_SIZE 4 + /* * Verify that dmu_object_{alloc,free} work as expected. */ void ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) { - ztest_od_t od[4]; - int batchsize = sizeof (od) / sizeof (od[0]); + ztest_od_t *od; + int batchsize; + int size; int b; + size = sizeof(ztest_od_t) * OD_ARRAY_SIZE; + od = umem_alloc(size, UMEM_NOFAIL); + batchsize = OD_ARRAY_SIZE; + for (b = 0; b < batchsize; b++) - ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); + ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); /* * Destroy the previous batch of objects, create a new batch, * and do some I/O on the new objects. */ - if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0) + if (ztest_object_init(zd, od, size, B_TRUE) != 0) return; while (ztest_random(4 * batchsize) != 0) ztest_io(zd, od[ztest_random(batchsize)].od_object, ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); + + umem_free(od, size); } +#undef OD_ARRAY_SIZE +#define OD_ARRAY_SIZE 2 + /* * Verify that dmu_{read,write} work as expected. */ void ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) { + int size; + ztest_od_t *od; + objset_t *os = zd->zd_os; - ztest_od_t od[2]; + size = sizeof(ztest_od_t) * OD_ARRAY_SIZE; + od = umem_alloc(size, UMEM_NOFAIL); dmu_tx_t *tx; int i, freeit, error; uint64_t n, s, txg; @@ -3246,11 +3378,13 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) /* * Read the directory info. If it's the first time, set things up. */ - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); - ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); + ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, size, B_FALSE) != 0) { + umem_free(od, size); return; + } bigobj = od[0].od_object; packobj = od[1].od_object; @@ -3314,6 +3448,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) if (txg == 0) { umem_free(packbuf, packsize); umem_free(bigbuf, bigsize); + umem_free(od, size); return; } @@ -3414,6 +3549,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) umem_free(packbuf, packsize); umem_free(bigbuf, bigsize); + umem_free(od, size); } void @@ -3465,14 +3601,18 @@ compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, } } +#undef OD_ARRAY_SIZE +#define OD_ARRAY_SIZE 2 + void ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[2]; + ztest_od_t *od; dmu_tx_t *tx; uint64_t i; int error; + int size; uint64_t n, s, txg; bufwad_t *packbuf, *bigbuf; uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; @@ -3485,6 +3625,9 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) arc_buf_t **bigbuf_arcbufs; dmu_object_info_t doi; + size = sizeof(ztest_od_t) * OD_ARRAY_SIZE; + od = umem_alloc(size, UMEM_NOFAIL); + /* * This test uses two objects, packobj and bigobj, that are always * updated together (i.e. in the same tx) so that their contents are @@ -3504,11 +3647,14 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) /* * Read the directory info. If it's the first time, set things up. */ - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); - ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); + ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); + - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, size, B_FALSE) != 0) { + umem_free(od, size); return; + } bigobj = od[0].od_object; packobj = od[1].od_object; @@ -3594,6 +3740,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) } } umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); + umem_free(od, size); dmu_buf_rele(bonus_db, FTAG); return; } @@ -3690,13 +3837,16 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) umem_free(packbuf, packsize); umem_free(bigbuf, bigsize); umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); + umem_free(od, size); } /* ARGSUSED */ void ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) { - ztest_od_t od[1]; + ztest_od_t *od; + + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); uint64_t offset = (1ULL << (ztest_random(20) + 43)) + (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); @@ -3705,47 +3855,56 @@ ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) * to verify that parallel writes to an object -- even to the * same blocks within the object -- doesn't cause any trouble. */ - ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); + ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) return; while (ztest_random(10) != 0) - ztest_io(zd, od[0].od_object, offset); + ztest_io(zd, od->od_object, offset); + + umem_free(od, sizeof(ztest_od_t)); } void ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) { - ztest_od_t od[1]; + ztest_od_t *od; uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); uint64_t count = ztest_random(20) + 1; uint64_t blocksize = ztest_random_blocksize(); void *data; - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); - if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); + + if (ztest_object_init(zd, od, sizeof (ztest_od_t), !ztest_random(2)) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } - if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) + if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } - ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); + ztest_prealloc(zd, od->od_object, offset, count * blocksize); data = umem_zalloc(blocksize, UMEM_NOFAIL); while (ztest_random(count) != 0) { uint64_t randoff = offset + (ztest_random(count) * blocksize); - if (ztest_write(zd, od[0].od_object, randoff, blocksize, + if (ztest_write(zd, od->od_object, randoff, blocksize, data) != 0) break; while (ztest_random(4) != 0) - ztest_io(zd, od[0].od_object, randoff); + ztest_io(zd, od->od_object, randoff); } umem_free(data, blocksize); + umem_free(od, sizeof(ztest_od_t)); } /* @@ -3759,7 +3918,7 @@ void ztest_zap(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; uint64_t object; uint64_t txg, last_txg; uint64_t value[ZTEST_ZAP_MAX_INTS]; @@ -3770,12 +3929,14 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) int error; char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) - return; + if (ztest_object_init(zd, od, sizeof (ztest_od_t), + !ztest_random(2)) != 0) + goto out; - object = od[0].od_object; + object = od->od_object; /* * Generate a known hash collision, and verify that @@ -3785,7 +3946,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_zap(tx, object, B_TRUE, NULL); txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); if (txg == 0) - return; + goto out; for (i = 0; i < 2; i++) { value[i] = i; VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), @@ -3853,7 +4014,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_zap(tx, object, B_TRUE, NULL); txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); if (txg == 0) - return; + goto out; if (last_txg > txg) fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); @@ -3878,7 +4039,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); if (error == ENOENT) - return; + goto out; ASSERT3U(error, ==, 0); @@ -3886,10 +4047,12 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_zap(tx, object, B_TRUE, NULL); txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); if (txg == 0) - return; + goto out; VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); dmu_tx_commit(tx); +out: + umem_free(od, sizeof(ztest_od_t)); } /* @@ -3899,16 +4062,17 @@ void ztest_fzap(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; uint64_t object, txg; int i; - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); - - if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) - return; + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); - object = od[0].od_object; + if (ztest_object_init(zd, od, sizeof (ztest_od_t), + !ztest_random(2)) != 0) + goto out; + object = od->od_object; /* * Add entries to this ZAP and make sure it spills over @@ -3928,12 +4092,14 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_zap(tx, object, B_TRUE, name); txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); if (txg == 0) - return; + goto out; error = zap_add(os, object, name, sizeof (uint64_t), 1, &value, tx); ASSERT(error == 0 || error == EEXIST); dmu_tx_commit(tx); } +out: + umem_free(od, sizeof(ztest_od_t)); } /* ARGSUSED */ @@ -3941,7 +4107,7 @@ void ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; dmu_tx_t *tx; int i, namelen, error; @@ -3949,12 +4115,15 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) char name[20], string_value[20]; void *data; - ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } - object = od[0].od_object; + object = od->od_object; /* * Generate a random name of the form 'xxx.....' where each @@ -4043,6 +4212,8 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) if (tx != NULL) dmu_tx_commit(tx); + + umem_free(od, sizeof(ztest_od_t)); } /* @@ -4088,18 +4259,20 @@ ztest_commit_callback(void *arg, int error) return; } - /* Was this callback added to the global callback list? */ - if (!data->zcd_added) - goto out; - + ASSERT(data->zcd_added); ASSERT3U(data->zcd_txg, !=, 0); + (void) mutex_enter(&zcl.zcl_callbacks_lock); + + /* See if this cb was called more quickly */ + if ((synced_txg - data->zcd_txg) < zc_min_txg_delay) + zc_min_txg_delay = synced_txg - data->zcd_txg; + /* Remove our callback from the list */ - (void) mutex_lock(&zcl.zcl_callbacks_lock); list_remove(&zcl.zcl_callbacks, data); - (void) mutex_unlock(&zcl.zcl_callbacks_lock); -out: + (void) mutex_exit(&zcl.zcl_callbacks_lock); + umem_free(data, sizeof (ztest_cb_data_t)); } @@ -4113,41 +4286,38 @@ ztest_create_cb_data(objset_t *os, uint64_t txg) cb_data->zcd_txg = txg; cb_data->zcd_spa = dmu_objset_spa(os); + list_link_init(&cb_data->zcd_node); return (cb_data); } /* - * If a number of txgs equal to this threshold have been created after a commit - * callback has been registered but not called, then we assume there is an - * implementation bug. - */ -#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) - -/* * Commit callback test. */ void ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; dmu_tx_t *tx; ztest_cb_data_t *cb_data[3], *tmp_cb; uint64_t old_txg, txg; - int i, error; + int i, error = 0; - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } tx = dmu_tx_create(os); cb_data[0] = ztest_create_cb_data(os, 0); dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); - dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); + dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t)); /* Every once in a while, abort the transaction on purpose */ if (ztest_random(100) == 0) @@ -4181,6 +4351,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) umem_free(cb_data[i], sizeof (ztest_cb_data_t)); } + umem_free(od, sizeof(ztest_od_t)); return; } @@ -4190,16 +4361,16 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) /* * Read existing data to make sure there isn't a future leak. */ - VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), + VERIFY(0 == dmu_read(os, od->od_object, 0, sizeof (uint64_t), &old_txg, DMU_READ_PREFETCH)); if (old_txg > txg) fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, old_txg, txg); - dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); + dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx); - (void) mutex_lock(&zcl.zcl_callbacks_lock); + (void) mutex_enter(&zcl.zcl_callbacks_lock); /* * Since commit callbacks don't have any ordering requirement and since @@ -4215,7 +4386,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) */ tmp_cb = list_head(&zcl.zcl_callbacks); if (tmp_cb != NULL && - tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) { + tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) { fatal(0, "Commit callback threshold exceeded, oldest txg: %" PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); } @@ -4246,9 +4417,13 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) tmp_cb = cb_data[i]; } - (void) mutex_unlock(&zcl.zcl_callbacks_lock); + zc_cb_counter += 3; + + (void) mutex_exit(&zcl.zcl_callbacks_lock); dmu_tx_commit(tx); + + umem_free(od, sizeof(ztest_od_t)); } /* ARGSUSED */ @@ -4264,13 +4439,13 @@ ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) ztest_shared_t *zs = ztest_shared; int p; - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); for (p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* ARGSUSED */ @@ -4280,7 +4455,7 @@ ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) ztest_shared_t *zs = ztest_shared; nvlist_t *props = NULL; - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); (void) ztest_spa_prop_set_uint64(zs, ZPOOL_PROP_DEDUPDITTO, ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); @@ -4292,7 +4467,7 @@ ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) nvlist_free(props); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -4310,14 +4485,14 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) char tag[100]; char osname[MAXNAMELEN]; - (void) rw_rdlock(&ztest_shared->zs_name_lock); + (void) rw_enter(&ztest_shared->zs_name_lock, RW_READER); dmu_objset_name(os, osname); - (void) snprintf(snapname, 100, "sh1_%llu", id); + (void) snprintf(snapname, 100, "sh1_%llu", (u_longlong_t)id); (void) snprintf(fullname, 100, "%s@%s", osname, snapname); - (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id); - (void) snprintf(tag, 100, "%tag_%llu", id); + (void) snprintf(clonename, 100, "%s/ch1_%llu",osname,(u_longlong_t)id); + (void) snprintf(tag, 100, "tag_%llu", (u_longlong_t)id); /* * Clean up from any previous run. @@ -4407,7 +4582,7 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT); out: - (void) rw_unlock(&ztest_shared->zs_name_lock); + (void) rw_exit(&ztest_shared->zs_name_lock); } /* @@ -4424,8 +4599,8 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) uint64_t leaves; uint64_t bad = 0x1990c0ffeedecadeull; uint64_t top, leaf; - char path0[MAXPATHLEN]; - char pathrand[MAXPATHLEN]; + char *path0; + char *pathrand; size_t fsize; int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ int iters = 1000; @@ -4435,11 +4610,14 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) uint64_t guid0 = 0; boolean_t islog = B_FALSE; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); + pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); + + mutex_enter(&zs->zs_vdev_lock); maxfaults = MAXFAULTS(); leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz; mirror_save = zs->zs_mirrors; - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); ASSERT(leaves >= 1); @@ -4461,9 +4639,9 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) * write failures and random online/offline activity on leaf 0, * and we'll write random garbage to the randomly chosen leaf. */ - (void) snprintf(path0, sizeof (path0), ztest_dev_template, + (void) snprintf(path0, MAXPATHLEN, ztest_dev_template, zopt_dir, zopt_pool, top * leaves + zs->zs_splits); - (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, + (void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template, zopt_dir, zopt_pool, top * leaves + leaf); vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); @@ -4501,7 +4679,7 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) if (sav->sav_count == 0) { spa_config_exit(spa, SCL_STATE, FTAG); - return; + goto out; } vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; guid0 = vd0->vdev_guid; @@ -4533,19 +4711,20 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) * leaving the dataset in an inconsistent state. */ if (islog) - (void) rw_wrlock(&ztest_shared->zs_name_lock); + (void) rw_enter(&ztest_shared->zs_name_lock, + RW_WRITER); VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); if (islog) - (void) rw_unlock(&ztest_shared->zs_name_lock); + (void) rw_exit(&ztest_shared->zs_name_lock); } else { (void) vdev_online(spa, guid0, 0, NULL); } } if (maxfaults == 0) - return; + goto out; /* * We have at least single-fault tolerance, so inject data corruption. @@ -4553,7 +4732,7 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) fd = open(pathrand, O_RDWR); if (fd == -1) /* we hit a gap in the device namespace */ - return; + goto out; fsize = lseek(fd, 0, SEEK_END); @@ -4565,18 +4744,18 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) if (offset >= fsize) continue; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); if (mirror_save != zs->zs_mirrors) { - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); (void) close(fd); - return; + goto out; } if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) fatal(1, "can't inject bad word at 0x%llx in %s", offset, pathrand); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); if (zopt_verbose >= 7) (void) printf("injected bad word into %s," @@ -4584,6 +4763,9 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) } (void) close(fd); +out: + umem_free(path0, MAXPATHLEN); + umem_free(pathrand, MAXPATHLEN); } /* @@ -4595,7 +4777,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) ztest_shared_t *zs = ztest_shared; spa_t *spa = zs->zs_spa; objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; uint64_t object, blocksize, txg, pattern, psize; enum zio_checksum checksum = spa_dedup_checksum(spa); dmu_buf_t *db; @@ -4608,28 +4790,32 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) blocksize = ztest_random_blocksize(); blocksize = MIN(blocksize, 2048); /* because we write so many */ - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } /* * Take the name lock as writer to prevent anyone else from changing * the pool and dataset properies we need to maintain during this test. */ - (void) rw_wrlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_WRITER); if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, B_FALSE) != 0 || ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, B_FALSE) != 0) { - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); + umem_free(od, sizeof(ztest_od_t)); return; } object = od[0].od_object; blocksize = od[0].od_blocksize; - pattern = spa_guid(spa) ^ dmu_objset_fsid_guid(os); + pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); ASSERT(object != 0); @@ -4637,7 +4823,8 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_write(tx, object, 0, copies * blocksize); txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); if (txg == 0) { - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); + umem_free(od, sizeof(ztest_od_t)); return; } @@ -4681,7 +4868,8 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) zio_buf_free(buf, psize); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); + umem_free(od, sizeof(ztest_od_t)); } /* @@ -4700,6 +4888,31 @@ ztest_scrub(ztest_ds_t *zd, uint64_t id) } /* + * Change the guid for the pool. + */ +/* ARGSUSED */ +void +ztest_reguid(ztest_ds_t *zd, uint64_t id) +{ + ztest_shared_t *zs = ztest_shared; + spa_t *spa = zs->zs_spa; + uint64_t orig, load; + + orig = spa_guid(spa); + load = spa_load_guid(spa); + if (spa_change_guid(spa) != 0) + return; + + if (zopt_verbose >= 3) { + (void) printf("Changed guid old %llu -> %llu\n", + (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); + } + + VERIFY3U(orig, !=, spa_guid(spa)); + VERIFY3U(load, ==, spa_load_guid(spa)); +} + +/* * Rename the pool to a different name and then rename it back. */ /* ARGSUSED */ @@ -4710,7 +4923,7 @@ ztest_spa_rename(ztest_ds_t *zd, uint64_t id) char *oldname, *newname; spa_t *spa; - (void) rw_wrlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_WRITER); oldname = zs->zs_pool; newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); @@ -4750,7 +4963,7 @@ ztest_spa_rename(ztest_ds_t *zd, uint64_t id) umem_free(newname, strlen(newname) + 1); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -4760,52 +4973,56 @@ static void ztest_run_zdb(char *pool) { int status; - char zdb[MAXPATHLEN + MAXNAMELEN + 20]; - char zbuf[1024]; char *bin; - char *ztest; - char *isa; - int isalen; + char *zdb; + char *zbuf; FILE *fp; - (void) realpath(getexecname(), zdb); + bin = umem_alloc(MAXPATHLEN + MAXNAMELEN + 20, UMEM_NOFAIL); + zdb = umem_alloc(MAXPATHLEN + MAXNAMELEN + 20, UMEM_NOFAIL); + zbuf = umem_alloc(1024, UMEM_NOFAIL); - /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ - bin = strstr(zdb, "/usr/bin/"); - ztest = strstr(bin, "/ztest"); - isa = bin + 8; - isalen = ztest - isa; - isa = strdup(isa); - /* LINTED */ - (void) sprintf(bin, - "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s", - isalen, - isa, + VERIFY(realpath(getexecname(), bin) != NULL); + if (strncmp(bin, "/usr/sbin/ztest", 15) == 0) { + strcpy(bin, "/usr/sbin/zdb"); /* Installed */ + } else if (strncmp(bin, "/sbin/ztest", 11) == 0) { + strcpy(bin, "/sbin/zdb"); /* Installed */ + } else { + strstr(bin, "/ztest/")[0] = '\0'; /* In-tree */ + strcat(bin, "/zdb/zdb"); + } + + (void) sprintf(zdb, + "%s -bcc%s%s -U %s %s", + bin, zopt_verbose >= 3 ? "s" : "", zopt_verbose >= 4 ? "v" : "", spa_config_path, pool); - free(isa); if (zopt_verbose >= 5) (void) printf("Executing %s\n", strstr(zdb, "zdb ")); fp = popen(zdb, "r"); - while (fgets(zbuf, sizeof (zbuf), fp) != NULL) + while (fgets(zbuf, 1024, fp) != NULL) if (zopt_verbose >= 3) (void) printf("%s", zbuf); status = pclose(fp); if (status == 0) - return; + goto out; ztest_dump_core = 0; if (WIFEXITED(status)) fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); else fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); +out: + umem_free(bin, MAXPATHLEN + MAXNAMELEN + 20); + umem_free(zdb, MAXPATHLEN + MAXNAMELEN + 20); + umem_free(zbuf, 1024); } static void @@ -4923,23 +5140,18 @@ ztest_resume_thread(void *arg) ztest_resume(spa); (void) poll(NULL, 0, 100); } - return (NULL); -} -static void * -ztest_deadman_thread(void *arg) -{ - ztest_shared_t *zs = arg; - int grace = 300; - hrtime_t delta; + thread_exit(); - delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace; - - (void) poll(NULL, 0, (int)(1000 * delta)); + return (NULL); +} - fatal(0, "failed to complete within %d seconds of deadline", grace); +#define GRACE 300 - return (NULL); +static void +ztest_deadman_alarm(int sig) +{ + fatal(0, "failed to complete within %d seconds of deadline", GRACE); } static void @@ -5000,6 +5212,8 @@ ztest_thread(void *arg) ztest_execute(zi, id); } + thread_exit(); + return (NULL); } @@ -5065,18 +5279,18 @@ ztest_dataset_open(ztest_shared_t *zs, int d) ztest_dataset_name(name, zs->zs_pool, d); - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); error = ztest_dataset_create(name); if (error == ENOSPC) { - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); ztest_record_enospc(FTAG); return (error); } ASSERT(error == 0 || error == EEXIST); VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); ztest_zd_init(zd, os); @@ -5127,9 +5341,11 @@ ztest_dataset_close(ztest_shared_t *zs, int d) static void ztest_run(ztest_shared_t *zs) { - thread_t *tid; + kt_did_t *tid; spa_t *spa; - thread_t resume_tid; + objset_t *os; + kthread_t *resume_thread; + uint64_t object; int error; int t, d; @@ -5138,8 +5354,8 @@ ztest_run(ztest_shared_t *zs) /* * Initialize parent/child shared state. */ - VERIFY(_mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL) == 0); - VERIFY(rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL) == 0); + mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL); + rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL); zs->zs_thread_start = gethrtime(); zs->zs_thread_stop = zs->zs_thread_start + zopt_passtime * NANOSEC; @@ -5148,7 +5364,7 @@ ztest_run(ztest_shared_t *zs) if (ztest_random(100) < zopt_killrate) zs->zs_thread_kill -= ztest_random(zopt_passtime * NANOSEC); - (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); + mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL); list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), offsetof(ztest_cb_data_t, zcd_node)); @@ -5158,8 +5374,13 @@ ztest_run(ztest_shared_t *zs) */ kernel_init(FREAD | FWRITE); VERIFY(spa_open(zs->zs_pool, &spa, FTAG) == 0); + spa->spa_debug = B_TRUE; zs->zs_spa = spa; + VERIFY3U(0, ==, dmu_objset_hold(zs->zs_pool, FTAG, &os)); + zs->zs_guid = dmu_objset_fsid_guid(os); + dmu_objset_rele(os, FTAG); + spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; /* @@ -5175,14 +5396,15 @@ ztest_run(ztest_shared_t *zs) /* * Create a thread to periodically resume suspended I/O. */ - VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, - &resume_tid) == 0); + VERIFY3P((resume_thread = zk_thread_create(NULL, 0, + (thread_func_t)ztest_resume_thread, spa, TS_RUN, NULL, 0, 0, + PTHREAD_CREATE_JOINABLE)), !=, NULL); /* - * Create a deadman thread to abort() if we hang. + * Set a deadman alarm to abort() if we hang. */ - VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, - NULL) == 0); + signal(SIGALRM, ztest_deadman_alarm); + alarm((zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + GRACE); /* * Verify that we can safely inquire about about any object, @@ -5208,7 +5430,7 @@ ztest_run(ztest_shared_t *zs) } zs->zs_enospc_count = 0; - tid = umem_zalloc(zopt_threads * sizeof (thread_t), UMEM_NOFAIL); + tid = umem_zalloc(zopt_threads * sizeof (kt_did_t), UMEM_NOFAIL); if (zopt_verbose >= 4) (void) printf("starting main threads...\n"); @@ -5217,10 +5439,16 @@ ztest_run(ztest_shared_t *zs) * Kick off all the tests that run in parallel. */ for (t = 0; t < zopt_threads; t++) { + kthread_t *thread; + if (t < zopt_datasets && ztest_dataset_open(zs, t) != 0) return; - VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, - THR_BOUND, &tid[t]) == 0); + + VERIFY3P(thread = zk_thread_create(NULL, 0, + (thread_func_t)ztest_thread, + (void *)(uintptr_t)t, TS_RUN, NULL, 0, 0, + PTHREAD_CREATE_JOINABLE), !=, NULL); + tid[t] = thread->t_tid; } /* @@ -5228,7 +5456,7 @@ ztest_run(ztest_shared_t *zs) * so we don't close datasets while threads are still using them. */ for (t = zopt_threads - 1; t >= 0; t--) { - VERIFY(thr_join(tid[t], NULL, NULL) == 0); + thread_join(tid[t]); if (t < zopt_datasets) ztest_dataset_close(zs, t); } @@ -5238,20 +5466,24 @@ ztest_run(ztest_shared_t *zs) zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); - umem_free(tid, zopt_threads * sizeof (thread_t)); + umem_free(tid, zopt_threads * sizeof (kt_did_t)); /* Kill the resume thread */ ztest_exiting = B_TRUE; - VERIFY(thr_join(resume_tid, NULL, NULL) == 0); + thread_join(resume_thread->t_tid); ztest_resume(spa); /* * Right before closing the pool, kick off a bunch of async I/O; * spa_close() should wait for it to complete. */ - for (uint64_t object = 1; object < 50; object++) + for (object = 1; object < 50; object++) dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); + /* Verify that at least one commit cb was called in a timely fashion */ + if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG) + VERIFY3U(zc_min_txg_delay, ==, 0); + spa_close(spa, FTAG); /* @@ -5277,11 +5509,9 @@ ztest_run(ztest_shared_t *zs) kernel_fini(); list_destroy(&zcl.zcl_callbacks); - - (void) _mutex_destroy(&zcl.zcl_callbacks_lock); - - (void) rwlock_destroy(&zs->zs_name_lock); - (void) _mutex_destroy(&zs->zs_vdev_lock); + mutex_destroy(&zcl.zcl_callbacks_lock); + rw_destroy(&zs->zs_name_lock); + mutex_destroy(&zs->zs_vdev_lock); } static void @@ -5379,7 +5609,7 @@ print_time(hrtime_t t, char *timebuf) } static nvlist_t * -make_random_props() +make_random_props(void) { nvlist_t *props; @@ -5405,8 +5635,8 @@ ztest_init(ztest_shared_t *zs) spa_t *spa; nvlist_t *nvroot, *props; - VERIFY(_mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL) == 0); - VERIFY(rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL) == 0); + mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL); + rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL); kernel_init(FREAD | FWRITE); @@ -5435,8 +5665,8 @@ ztest_init(ztest_shared_t *zs) ztest_run_zdb(zs->zs_pool); - (void) rwlock_destroy(&zs->zs_name_lock); - (void) _mutex_destroy(&zs->zs_vdev_lock); + (void) rw_destroy(&zs->zs_name_lock); + (void) mutex_destroy(&zs->zs_vdev_lock); } int @@ -5456,10 +5686,12 @@ main(int argc, char **argv) ztest_random_fd = open("/dev/urandom", O_RDONLY); + dprintf_setup(&argc, argv); process_options(argc, argv); /* Override location of zpool.cache */ - (void) asprintf((char **)&spa_config_path, "%s/zpool.cache", zopt_dir); + VERIFY(asprintf((char **)&spa_config_path, "%s/zpool.cache", + zopt_dir) != -1); /* * Blow away any existing copy of zpool.cache