X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=cmd%2Fztest%2Fztest.c;h=930342d2e421472d93c45d68efa60f2bc9b55143;hb=47050a88ac1445f3557030d0d7e61b8a397a5476;hp=eed92ec72ebb5546bf75d1f3d2a21c036067b68b;hpb=428870ff734fdaccc342b33fc53cf94724409a46;p=zfs.git diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index eed92ec..930342d 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -57,6 +57,9 @@ * the transaction group number is less than the current, open txg. * If you add a new test, please do this if applicable. * + * (7) Threads are created with a reduced stack size, for sanity checking. + * Therefore, it's important not to allocate huge buffers on the stack. + * * When run with no arguments, ztest runs for about five minutes and * produces no output if successful. To get a little bit of information, * specify -V. To get more information, specify -VV, and so on. @@ -168,8 +171,8 @@ typedef enum { typedef struct rll { void *rll_writer; int rll_readers; - mutex_t rll_lock; - cond_t rll_cv; + kmutex_t rll_lock; + kcondvar_t rll_cv; } rll_t; typedef struct rl { @@ -206,7 +209,7 @@ typedef struct ztest_ds { uint64_t zd_seq; ztest_od_t *zd_od; /* debugging aid */ char zd_name[MAXNAMELEN]; - mutex_t zd_dirobj_lock; + kmutex_t zd_dirobj_lock; rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; } ztest_ds_t; @@ -299,8 +302,8 @@ ztest_info_t ztest_info[] = { * The callbacks are ordered by txg number. */ typedef struct ztest_cb_list { - mutex_t zcl_callbacks_lock; - list_t zcl_callbacks; + kmutex_t zcl_callbacks_lock; + list_t zcl_callbacks; } ztest_cb_list_t; /* @@ -319,8 +322,8 @@ typedef struct ztest_shared { uint64_t zs_vdev_aux; uint64_t zs_alloc; uint64_t zs_space; - mutex_t zs_vdev_lock; - rwlock_t zs_name_lock; + kmutex_t zs_vdev_lock; + krwlock_t zs_name_lock; ztest_info_t zs_info[ZTEST_FUNCS]; uint64_t zs_splits; uint64_t zs_mirrors; @@ -341,6 +344,22 @@ static boolean_t ztest_exiting; /* Global commit callback list */ static ztest_cb_list_t zcl; +/* Commit cb delay */ +static uint64_t zc_min_txg_delay = UINT64_MAX; +static int zc_cb_counter = 0; + +/* + * Minimum number of commit callbacks that need to be registered for us to check + * whether the minimum txg delay is acceptable. + */ +#define ZTEST_COMMIT_CB_MIN_REG 100 + +/* + * If a number of txgs equal to this threshold have been created after a commit + * callback has been registered but not called, then we assume there is an + * implementation bug. + */ +#define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000) extern uint64_t metaslab_gang_bang; extern uint64_t metaslab_df_alloc_threshold; @@ -359,7 +378,7 @@ static void usage(boolean_t) __NORETURN; * debugging facilities. */ const char * -_umem_debug_init() +_umem_debug_init(void) { return ("default,verbose"); /* $UMEM_DEBUG setting */ } @@ -876,8 +895,8 @@ ztest_rll_init(rll_t *rll) { rll->rll_writer = NULL; rll->rll_readers = 0; - VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); - VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); + mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL); } static void @@ -885,32 +904,32 @@ ztest_rll_destroy(rll_t *rll) { ASSERT(rll->rll_writer == NULL); ASSERT(rll->rll_readers == 0); - VERIFY(_mutex_destroy(&rll->rll_lock) == 0); - VERIFY(cond_destroy(&rll->rll_cv) == 0); + mutex_destroy(&rll->rll_lock); + cv_destroy(&rll->rll_cv); } static void ztest_rll_lock(rll_t *rll, rl_type_t type) { - VERIFY(mutex_lock(&rll->rll_lock) == 0); + mutex_enter(&rll->rll_lock); if (type == RL_READER) { while (rll->rll_writer != NULL) - (void) cond_wait(&rll->rll_cv, &rll->rll_lock); + (void) cv_wait(&rll->rll_cv, &rll->rll_lock); rll->rll_readers++; } else { while (rll->rll_writer != NULL || rll->rll_readers) - (void) cond_wait(&rll->rll_cv, &rll->rll_lock); + (void) cv_wait(&rll->rll_cv, &rll->rll_lock); rll->rll_writer = curthread; } - VERIFY(mutex_unlock(&rll->rll_lock) == 0); + mutex_exit(&rll->rll_lock); } static void ztest_rll_unlock(rll_t *rll) { - VERIFY(mutex_lock(&rll->rll_lock) == 0); + mutex_enter(&rll->rll_lock); if (rll->rll_writer) { ASSERT(rll->rll_readers == 0); @@ -922,9 +941,9 @@ ztest_rll_unlock(rll_t *rll) } if (rll->rll_writer == NULL && rll->rll_readers == 0) - VERIFY(cond_broadcast(&rll->rll_cv) == 0); + cv_broadcast(&rll->rll_cv); - VERIFY(mutex_unlock(&rll->rll_lock) == 0); + mutex_exit(&rll->rll_lock); } static void @@ -979,25 +998,28 @@ ztest_zd_init(ztest_ds_t *zd, objset_t *os) zd->zd_zilog = dmu_objset_zil(os); zd->zd_seq = 0; dmu_objset_name(os, zd->zd_name); + int l; - VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); + mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL); - for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) + for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) ztest_rll_init(&zd->zd_object_lock[l]); - for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) + for (l = 0; l < ZTEST_RANGE_LOCKS; l++) ztest_rll_init(&zd->zd_range_lock[l]); } static void ztest_zd_fini(ztest_ds_t *zd) { - VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); + int l; + + mutex_destroy(&zd->zd_dirobj_lock); - for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++) + for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) ztest_rll_destroy(&zd->zd_object_lock[l]); - for (int l = 0; l < ZTEST_RANGE_LOCKS; l++) + for (l = 0; l < ZTEST_RANGE_LOCKS; l++) ztest_rll_destroy(&zd->zd_range_lock[l]); } @@ -1039,6 +1061,7 @@ ztest_pattern_set(void *buf, uint64_t size, uint64_t value) *ip++ = value; } +#ifndef NDEBUG static boolean_t ztest_pattern_match(void *buf, uint64_t size, uint64_t value) { @@ -1051,6 +1074,7 @@ ztest_pattern_match(void *buf, uint64_t size, uint64_t value) return (diff == 0); } +#endif static void ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, @@ -1102,7 +1126,7 @@ ztest_bt_bonus(dmu_buf_t *db) #define lrz_bonustype lr_rdev #define lrz_bonuslen lr_crtime[1] -static uint64_t +static void ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) { char *name = (void *)(lr + 1); /* name follows lr */ @@ -1110,40 +1134,41 @@ ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) itx_t *itx; if (zil_replaying(zd->zd_zilog, tx)) - return (0); + return; itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, sizeof (*lr) + namesize - sizeof (lr_t)); - return (zil_itx_assign(zd->zd_zilog, itx, tx)); + zil_itx_assign(zd->zd_zilog, itx, tx); } -static uint64_t -ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr) +static void +ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) { char *name = (void *)(lr + 1); /* name follows lr */ size_t namesize = strlen(name) + 1; itx_t *itx; if (zil_replaying(zd->zd_zilog, tx)) - return (0); + return; itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, sizeof (*lr) + namesize - sizeof (lr_t)); - return (zil_itx_assign(zd->zd_zilog, itx, tx)); + itx->itx_oid = object; + zil_itx_assign(zd->zd_zilog, itx, tx); } -static uint64_t +static void ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) { itx_t *itx; itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); if (zil_replaying(zd->zd_zilog, tx)) - return (0); + return; if (lr->lr_length > ZIL_MAX_LOG_DATA) write_state = WR_INDIRECT; @@ -1166,37 +1191,39 @@ ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, sizeof (*lr) - sizeof (lr_t)); - return (zil_itx_assign(zd->zd_zilog, itx, tx)); + zil_itx_assign(zd->zd_zilog, itx, tx); } -static uint64_t +static void ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) { itx_t *itx; if (zil_replaying(zd->zd_zilog, tx)) - return (0); + return; itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, sizeof (*lr) - sizeof (lr_t)); - return (zil_itx_assign(zd->zd_zilog, itx, tx)); + itx->itx_sync = B_FALSE; + zil_itx_assign(zd->zd_zilog, itx, tx); } -static uint64_t +static void ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) { itx_t *itx; if (zil_replaying(zd->zd_zilog, tx)) - return (0); + return; itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, sizeof (*lr) - sizeof (lr_t)); - return (zil_itx_assign(zd->zd_zilog, itx, tx)); + itx->itx_sync = B_FALSE; + zil_itx_assign(zd->zd_zilog, itx, tx); } /* @@ -1328,7 +1355,7 @@ ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); - (void) ztest_log_remove(zd, tx, lr); + (void) ztest_log_remove(zd, tx, lr, object); dmu_tx_commit(tx); @@ -1573,26 +1600,26 @@ ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) } zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { - NULL, /* 0 no such transaction type */ - ztest_replay_create, /* TX_CREATE */ - NULL, /* TX_MKDIR */ - NULL, /* TX_MKXATTR */ - NULL, /* TX_SYMLINK */ - ztest_replay_remove, /* TX_REMOVE */ - NULL, /* TX_RMDIR */ - NULL, /* TX_LINK */ - NULL, /* TX_RENAME */ - ztest_replay_write, /* TX_WRITE */ - ztest_replay_truncate, /* TX_TRUNCATE */ - ztest_replay_setattr, /* TX_SETATTR */ - NULL, /* TX_ACL */ - NULL, /* TX_CREATE_ACL */ - NULL, /* TX_CREATE_ATTR */ - NULL, /* TX_CREATE_ACL_ATTR */ - NULL, /* TX_MKDIR_ACL */ - NULL, /* TX_MKDIR_ATTR */ - NULL, /* TX_MKDIR_ACL_ATTR */ - NULL, /* TX_WRITE2 */ + NULL, /* 0 no such transaction type */ + (zil_replay_func_t *)ztest_replay_create, /* TX_CREATE */ + NULL, /* TX_MKDIR */ + NULL, /* TX_MKXATTR */ + NULL, /* TX_SYMLINK */ + (zil_replay_func_t *)ztest_replay_remove, /* TX_REMOVE */ + NULL, /* TX_RMDIR */ + NULL, /* TX_LINK */ + NULL, /* TX_RENAME */ + (zil_replay_func_t *)ztest_replay_write, /* TX_WRITE */ + (zil_replay_func_t *)ztest_replay_truncate, /* TX_TRUNCATE */ + (zil_replay_func_t *)ztest_replay_setattr, /* TX_SETATTR */ + NULL, /* TX_ACL */ + NULL, /* TX_CREATE_ACL */ + NULL, /* TX_CREATE_ATTR */ + NULL, /* TX_CREATE_ACL_ATTR */ + NULL, /* TX_MKDIR_ACL */ + NULL, /* TX_MKDIR_ATTR */ + NULL, /* TX_MKDIR_ACL_ATTR */ + NULL, /* TX_WRITE2 */ }; /* @@ -1728,10 +1755,11 @@ ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) { int missing = 0; int error; + int i; - ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + ASSERT(mutex_held(&zd->zd_dirobj_lock)); - for (int i = 0; i < count; i++, od++) { + for (i = 0; i < count; i++, od++) { od->od_object = 0; error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, sizeof (uint64_t), 1, &od->od_object); @@ -1768,10 +1796,11 @@ static int ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) { int missing = 0; + int i; - ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + ASSERT(mutex_held(&zd->zd_dirobj_lock)); - for (int i = 0; i < count; i++, od++) { + for (i = 0; i < count; i++, od++) { if (missing) { od->od_object = 0; missing++; @@ -1813,12 +1842,13 @@ ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) { int missing = 0; int error; + int i; - ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + ASSERT(mutex_held(&zd->zd_dirobj_lock)); od += count - 1; - for (int i = count - 1; i >= 0; i--, od--) { + for (i = count - 1; i >= 0; i--, od--) { if (missing) { missing++; continue; @@ -1989,6 +2019,8 @@ ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) case ZTEST_IO_SETATTR: (void) ztest_setattr(zd, object); break; + default: + break; } umem_free(data, blocksize); @@ -2013,7 +2045,7 @@ ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, od->od_gen = 0; (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", - tag, (int64_t)id, index); + tag, (longlong_t)id, (u_longlong_t)index); } /* @@ -2028,13 +2060,13 @@ ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) int count = size / sizeof (*od); int rv = 0; - VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); + mutex_enter(&zd->zd_dirobj_lock); if ((ztest_lookup(zd, od, count) != 0 || remove) && (ztest_remove(zd, od, count) != 0 || ztest_create(zd, od, count) != 0)) rv = -1; zd->zd_od = od; - VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); + mutex_exit(&zd->zd_dirobj_lock); return (rv); } @@ -2045,7 +2077,7 @@ ztest_zil_commit(ztest_ds_t *zd, uint64_t id) { zilog_t *zilog = zd->zd_zilog; - zil_commit(zilog, UINT64_MAX, ztest_random(ZTEST_OBJECTS)); + zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); /* * Remember the committed values in zd, which is in parent/child @@ -2090,7 +2122,7 @@ ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) * Attempt to create an existing pool. It shouldn't matter * what's in the nvroot; we should fail with EEXIST. */ - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); VERIFY3U(EEXIST, ==, spa_create(zs->zs_pool, nvroot, NULL, NULL, NULL)); nvlist_free(nvroot); @@ -2098,18 +2130,19 @@ ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) VERIFY3U(EBUSY, ==, spa_destroy(zs->zs_pool)); spa_close(spa, FTAG); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } static vdev_t * vdev_lookup_by_path(vdev_t *vd, const char *path) { vdev_t *mvd; + int c; if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) return (vd); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != NULL) return (mvd); @@ -2151,7 +2184,7 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) nvlist_t *nvroot; int error; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * zopt_raidz; spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -2177,9 +2210,9 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) * dmu_objset_destroy() to fail with EBUSY thus * leaving the dataset in an inconsistent state. */ - VERIFY(rw_wrlock(&ztest_shared->zs_name_lock) == 0); + rw_enter(&ztest_shared->zs_name_lock, RW_WRITER); error = spa_vdev_remove(spa, guid, B_FALSE); - VERIFY(rw_unlock(&ztest_shared->zs_name_lock) == 0); + rw_exit(&ztest_shared->zs_name_lock); if (error && error != EEXIST) fatal(0, "spa_vdev_remove() = %d", error); @@ -2201,7 +2234,7 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) fatal(0, "spa_vdev_add() = %d", error); } - VERIFY(mutex_unlock(&ztest_shared->zs_vdev_lock) == 0); + mutex_exit(&ztest_shared->zs_vdev_lock); } /* @@ -2227,7 +2260,7 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) aux = ZPOOL_CONFIG_L2CACHE; } - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -2283,7 +2316,7 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); } - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); } /* @@ -2300,11 +2333,11 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id) uint_t c, children, schildren = 0, lastlogid = 0; int error = 0; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); /* ensure we have a useable config; mirrors of raidz aren't supported */ if (zs->zs_mirrors < 3 || zopt_raidz > 1) { - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } @@ -2363,9 +2396,9 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id) spa_config_exit(spa, SCL_VDEV, FTAG); - (void) rw_wrlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_WRITER); error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); nvlist_free(config); @@ -2378,7 +2411,7 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id) ++zs->zs_splits; --zs->zs_mirrors; } - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); } @@ -2407,7 +2440,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) int oldvd_is_log; int error, expected_error; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz; spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -2468,7 +2501,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) if (error != 0 && error != ENODEV && error != EBUSY && error != ENOTSUP) fatal(0, "detach (%s) returned %d", oldpath, error); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } @@ -2561,7 +2594,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) (longlong_t)newsize, replacing, error, expected_error); } - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); } /* @@ -2570,7 +2603,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) vdev_t * grow_vdev(vdev_t *vd, void *arg) { - spa_t *spa = vd->vdev_spa; + ASSERTV(spa_t *spa = vd->vdev_spa); size_t *newsize = arg; size_t fsize; int fd; @@ -2582,7 +2615,7 @@ grow_vdev(vdev_t *vd, void *arg) return (vd); fsize = lseek(fd, 0, SEEK_END); - (void) ftruncate(fd, *newsize); + VERIFY(ftruncate(fd, *newsize) == 0); if (zopt_verbose >= 6) { (void) printf("%s grew from %lu to %lu bytes\n", @@ -2659,6 +2692,8 @@ online_vdev(vdev_t *vd, void *arg) vdev_t * vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) { + uint_t c; + if (vd->vdev_ops->vdev_op_leaf) { if (func == NULL) return (vd); @@ -2666,7 +2701,7 @@ vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) return (func(vd, arg)); } - for (uint_t c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) return (cvd); @@ -2690,7 +2725,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) uint64_t top; uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); spa_config_enter(spa, SCL_STATE, spa, RW_READER); top = ztest_random_vdev_top(spa, B_TRUE); @@ -2718,7 +2753,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) if (tvd->vdev_state != VDEV_STATE_HEALTHY || psize == 0 || psize >= 4 * zopt_vdev_size) { spa_config_exit(spa, SCL_STATE, spa); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } ASSERT(psize > 0); @@ -2743,7 +2778,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) "the vdev configuration changed.\n"); } spa_config_exit(spa, SCL_STATE, spa); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } @@ -2777,7 +2812,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) "intervening vdev offline or remove.\n"); } spa_config_exit(spa, SCL_STATE, spa); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); return; } @@ -2805,7 +2840,7 @@ ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) } spa_config_exit(spa, SCL_STATE, spa); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); } /* @@ -2875,7 +2910,7 @@ ztest_snapshot_create(char *osname, uint64_t id) (u_longlong_t)id); error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1, - NULL, B_FALSE); + NULL, NULL, B_FALSE, B_FALSE, -1); if (error == ENOSPC) { ztest_record_enospc(FTAG); return (B_FALSE); @@ -2911,8 +2946,9 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) objset_t *os, *os2; char name[MAXNAMELEN]; zilog_t *zilog; + int i; - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu", zs->zs_pool, (u_longlong_t)id); @@ -2950,7 +2986,7 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) if (error) { if (error == ENOSPC) { ztest_record_enospc(FTAG); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); return; } fatal(0, "dmu_objset_create(%s) = %d", name, error); @@ -2971,7 +3007,7 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) * and randomly take a couple of snapshots along the way. */ iters = ztest_random(5); - for (int i = 0; i < iters; i++) { + for (i = 0; i < iters; i++) { ztest_dmu_object_alloc_free(&zdtmp, id); if (ztest_random(iters) == 0) (void) ztest_snapshot_create(name, i); @@ -2999,7 +3035,7 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) dmu_objset_disown(os, FTAG); ztest_zd_fini(&zdtmp); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -3010,10 +3046,10 @@ ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) { ztest_shared_t *zs = ztest_shared; - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); (void) ztest_snapshot_destroy(zd->zd_name, id); (void) ztest_snapshot_create(zd->zd_name, id); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -3029,11 +3065,16 @@ ztest_dsl_dataset_cleanup(char *osname, uint64_t id) char snap3name[MAXNAMELEN]; int error; - (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); - (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); - (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); - (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); - (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); + (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", + osname, (u_longlong_t)id); + (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", + osname, (u_longlong_t)id); + (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", + clone1name, (u_longlong_t)id); + (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", + osname, (u_longlong_t)id); + (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", + clone1name, (u_longlong_t)id); error = dmu_objset_destroy(clone2name, B_FALSE); if (error && error != ENOENT) @@ -3069,18 +3110,23 @@ ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) char *osname = zd->zd_name; int error; - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); ztest_dsl_dataset_cleanup(osname, id); - (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); - (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); - (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); - (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); - (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); + (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", + osname, (u_longlong_t)id); + (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", + osname, (u_longlong_t)id); + (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", + clone1name, (u_longlong_t)id); + (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", + osname, (u_longlong_t)id); + (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", + clone1name, (u_longlong_t)id); error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1, - NULL, B_FALSE); + NULL, NULL, B_FALSE, B_FALSE, -1); if (error && error != EEXIST) { if (error == ENOSPC) { ztest_record_enospc(FTAG); @@ -3104,7 +3150,7 @@ ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) } error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1, - NULL, B_FALSE); + NULL, NULL, B_FALSE, B_FALSE, -1); if (error && error != EEXIST) { if (error == ENOSPC) { ztest_record_enospc(FTAG); @@ -3114,7 +3160,7 @@ ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) } error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1, - NULL, B_FALSE); + NULL, NULL, B_FALSE, B_FALSE, -1); if (error && error != EEXIST) { if (error == ENOSPC) { ztest_record_enospc(FTAG); @@ -3149,7 +3195,7 @@ ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) out: ztest_dsl_dataset_cleanup(osname, id); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -3160,8 +3206,9 @@ ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) { ztest_od_t od[4]; int batchsize = sizeof (od) / sizeof (od[0]); + int b; - for (int b = 0; b < batchsize; b++) + for (b = 0; b < batchsize; b++) ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0); /* @@ -3877,6 +3924,7 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id) objset_t *os = zd->zd_os; ztest_od_t od[1]; uint64_t object, txg; + int i; ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); @@ -3890,14 +3938,14 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id) * and gets upgraded to a fatzap. Also, since we are adding * 2050 entries we should see ptrtbl growth and leaf-block split. */ - for (int i = 0; i < 2050; i++) { + for (i = 0; i < 2050; i++) { char name[MAXNAMELEN]; uint64_t value = i; dmu_tx_t *tx; int error; (void) snprintf(name, sizeof (name), "fzap-%llu-%llu", - id, value); + (u_longlong_t)id, (u_longlong_t)value); tx = dmu_tx_create(os); dmu_tx_hold_zap(tx, object, B_TRUE, name); @@ -4063,18 +4111,20 @@ ztest_commit_callback(void *arg, int error) return; } - /* Was this callback added to the global callback list? */ - if (!data->zcd_added) - goto out; - + ASSERT(data->zcd_added); ASSERT3U(data->zcd_txg, !=, 0); + (void) mutex_enter(&zcl.zcl_callbacks_lock); + + /* See if this cb was called more quickly */ + if ((synced_txg - data->zcd_txg) < zc_min_txg_delay) + zc_min_txg_delay = synced_txg - data->zcd_txg; + /* Remove our callback from the list */ - (void) mutex_lock(&zcl.zcl_callbacks_lock); list_remove(&zcl.zcl_callbacks, data); - (void) mutex_unlock(&zcl.zcl_callbacks_lock); -out: + (void) mutex_exit(&zcl.zcl_callbacks_lock); + umem_free(data, sizeof (ztest_cb_data_t)); } @@ -4088,18 +4138,12 @@ ztest_create_cb_data(objset_t *os, uint64_t txg) cb_data->zcd_txg = txg; cb_data->zcd_spa = dmu_objset_spa(os); + list_link_init(&cb_data->zcd_node); return (cb_data); } /* - * If a number of txgs equal to this threshold have been created after a commit - * callback has been registered but not called, then we assume there is an - * implementation bug. - */ -#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2) - -/* * Commit callback test. */ void @@ -4110,7 +4154,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) dmu_tx_t *tx; ztest_cb_data_t *cb_data[3], *tmp_cb; uint64_t old_txg, txg; - int i, error; + int i, error = 0; ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); @@ -4174,7 +4218,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); - (void) mutex_lock(&zcl.zcl_callbacks_lock); + (void) mutex_enter(&zcl.zcl_callbacks_lock); /* * Since commit callbacks don't have any ordering requirement and since @@ -4190,7 +4234,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) */ tmp_cb = list_head(&zcl.zcl_callbacks); if (tmp_cb != NULL && - tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) { + tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) { fatal(0, "Commit callback threshold exceeded, oldest txg: %" PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg); } @@ -4221,7 +4265,9 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) tmp_cb = cb_data[i]; } - (void) mutex_unlock(&zcl.zcl_callbacks_lock); + zc_cb_counter += 3; + + (void) mutex_exit(&zcl.zcl_callbacks_lock); dmu_tx_commit(tx); } @@ -4237,14 +4283,15 @@ ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) ZFS_PROP_DEDUP }; ztest_shared_t *zs = ztest_shared; + int p; - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); - for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) + for (p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* ARGSUSED */ @@ -4254,7 +4301,7 @@ ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) ztest_shared_t *zs = ztest_shared; nvlist_t *props = NULL; - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); (void) ztest_spa_prop_set_uint64(zs, ZPOOL_PROP_DEDUPDITTO, ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN)); @@ -4266,7 +4313,7 @@ ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) nvlist_free(props); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -4284,14 +4331,14 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) char tag[100]; char osname[MAXNAMELEN]; - (void) rw_rdlock(&ztest_shared->zs_name_lock); + (void) rw_enter(&ztest_shared->zs_name_lock, RW_READER); dmu_objset_name(os, osname); - (void) snprintf(snapname, 100, "sh1_%llu", id); + (void) snprintf(snapname, 100, "sh1_%llu", (u_longlong_t)id); (void) snprintf(fullname, 100, "%s@%s", osname, snapname); - (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id); - (void) snprintf(tag, 100, "%tag_%llu", id); + (void) snprintf(clonename, 100, "%s/ch1_%llu",osname,(u_longlong_t)id); + (void) snprintf(tag, 100, "tag_%llu", (u_longlong_t)id); /* * Clean up from any previous run. @@ -4304,7 +4351,8 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) * Create snapshot, clone it, mark snap for deferred destroy, * destroy clone, verify snap was also destroyed. */ - error = dmu_objset_snapshot(osname, snapname, NULL, FALSE); + error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE, + FALSE, -1); if (error) { if (error == ENOSPC) { ztest_record_enospc("dmu_objset_snapshot"); @@ -4346,7 +4394,8 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) * destroy a held snapshot, mark for deferred destroy, * release hold, verify snapshot was destroyed. */ - error = dmu_objset_snapshot(osname, snapname, NULL, FALSE); + error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE, + FALSE, -1); if (error) { if (error == ENOSPC) { ztest_record_enospc("dmu_objset_snapshot"); @@ -4355,7 +4404,8 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error); } - error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE, B_TRUE); + error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE, + B_TRUE, -1); if (error) fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag); @@ -4378,7 +4428,7 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT); out: - (void) rw_unlock(&ztest_shared->zs_name_lock); + (void) rw_exit(&ztest_shared->zs_name_lock); } /* @@ -4393,7 +4443,7 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) int fd; uint64_t offset; uint64_t leaves; - uint64_t bad = 0x1990c0ffeedecade; + uint64_t bad = 0x1990c0ffeedecadeull; uint64_t top, leaf; char path0[MAXPATHLEN]; char pathrand[MAXPATHLEN]; @@ -4406,11 +4456,11 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) uint64_t guid0 = 0; boolean_t islog = B_FALSE; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); maxfaults = MAXFAULTS(); leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz; mirror_save = zs->zs_mirrors; - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); ASSERT(leaves >= 1); @@ -4504,12 +4554,13 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) * leaving the dataset in an inconsistent state. */ if (islog) - (void) rw_wrlock(&ztest_shared->zs_name_lock); + (void) rw_enter(&ztest_shared->zs_name_lock, + RW_WRITER); VERIFY(vdev_offline(spa, guid0, flags) != EBUSY); if (islog) - (void) rw_unlock(&ztest_shared->zs_name_lock); + (void) rw_exit(&ztest_shared->zs_name_lock); } else { (void) vdev_online(spa, guid0, 0, NULL); } @@ -4536,9 +4587,9 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) if (offset >= fsize) continue; - VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + mutex_enter(&zs->zs_vdev_lock); if (mirror_save != zs->zs_mirrors) { - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); (void) close(fd); return; } @@ -4547,7 +4598,7 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) fatal(1, "can't inject bad word at 0x%llx in %s", offset, pathrand); - VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0); + mutex_exit(&zs->zs_vdev_lock); if (zopt_verbose >= 7) (void) printf("injected bad word into %s," @@ -4574,6 +4625,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) void *buf; blkptr_t blk; int copies = 2 * ZIO_DEDUPDITTO_MIN; + int i; blocksize = ztest_random_blocksize(); blocksize = MIN(blocksize, 2048); /* because we write so many */ @@ -4587,13 +4639,13 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) * Take the name lock as writer to prevent anyone else from changing * the pool and dataset properies we need to maintain during this test. */ - (void) rw_wrlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_WRITER); if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum, B_FALSE) != 0 || ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, B_FALSE) != 0) { - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); return; } @@ -4607,14 +4659,14 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_write(tx, object, 0, copies * blocksize); txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); if (txg == 0) { - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); return; } /* * Write all the copies of our block. */ - for (int i = 0; i < copies; i++) { + for (i = 0; i < copies; i++) { uint64_t offset = i * blocksize; VERIFY(dmu_buf_hold(os, object, offset, FTAG, &db, DMU_READ_NO_PREFETCH) == 0); @@ -4651,7 +4703,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) zio_buf_free(buf, psize); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -4680,7 +4732,7 @@ ztest_spa_rename(ztest_ds_t *zd, uint64_t id) char *oldname, *newname; spa_t *spa; - (void) rw_wrlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_WRITER); oldname = zs->zs_pool; newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); @@ -4720,7 +4772,7 @@ ztest_spa_rename(ztest_ds_t *zd, uint64_t id) umem_free(newname, strlen(newname) + 1); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); } /* @@ -4843,19 +4895,19 @@ ztest_spa_import_export(char *oldname, char *newname) /* * Import it under the new name. */ - VERIFY3U(0, ==, spa_import(newname, config, NULL)); + VERIFY3U(0, ==, spa_import(newname, config, NULL, 0)); ztest_walk_pool_directory("pools after import"); /* * Try to import it again -- should fail with EEXIST. */ - VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL)); + VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); /* * Try to import it under a different name -- should fail with EEXIST. */ - VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL)); + VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); /* * Verify that the pool is no longer visible under the old name. @@ -4893,23 +4945,18 @@ ztest_resume_thread(void *arg) ztest_resume(spa); (void) poll(NULL, 0, 100); } - return (NULL); -} - -static void * -ztest_deadman_thread(void *arg) -{ - ztest_shared_t *zs = arg; - int grace = 300; - hrtime_t delta; - delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace; + thread_exit(); - (void) poll(NULL, 0, (int)(1000 * delta)); + return (NULL); +} - fatal(0, "failed to complete within %d seconds of deadline", grace); +#define GRACE 300 - return (NULL); +static void +ztest_deadman_alarm(int sig) +{ + fatal(0, "failed to complete within %d seconds of deadline", GRACE); } static void @@ -4918,8 +4965,9 @@ ztest_execute(ztest_info_t *zi, uint64_t id) ztest_shared_t *zs = ztest_shared; ztest_ds_t *zd = &zs->zs_zd[id % zopt_datasets]; hrtime_t functime = gethrtime(); + int i; - for (int i = 0; i < zi->zi_iters; i++) + for (i = 0; i < zi->zi_iters; i++) zi->zi_func(zd, id); functime = gethrtime() - functime; @@ -4969,6 +5017,8 @@ ztest_thread(void *arg) ztest_execute(zi, id); } + thread_exit(); + return (NULL); } @@ -4982,6 +5032,7 @@ static void ztest_dataset_destroy(ztest_shared_t *zs, int d) { char name[MAXNAMELEN]; + int t; ztest_dataset_name(name, zs->zs_pool, d); @@ -4993,7 +5044,7 @@ ztest_dataset_destroy(ztest_shared_t *zs, int d) * ztest thread t operates on dataset (t % zopt_datasets), * so there may be more than one thing to clean up. */ - for (int t = d; t < zopt_threads; t += zopt_datasets) + for (t = d; t < zopt_threads; t += zopt_datasets) ztest_dsl_dataset_cleanup(name, t); (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, @@ -5033,18 +5084,18 @@ ztest_dataset_open(ztest_shared_t *zs, int d) ztest_dataset_name(name, zs->zs_pool, d); - (void) rw_rdlock(&zs->zs_name_lock); + (void) rw_enter(&zs->zs_name_lock, RW_READER); error = ztest_dataset_create(name); if (error == ENOSPC) { - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); ztest_record_enospc(FTAG); return (error); } ASSERT(error == 0 || error == EEXIST); VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0); - (void) rw_unlock(&zs->zs_name_lock); + (void) rw_exit(&zs->zs_name_lock); ztest_zd_init(zd, os); @@ -5095,18 +5146,20 @@ ztest_dataset_close(ztest_shared_t *zs, int d) static void ztest_run(ztest_shared_t *zs) { - thread_t *tid; + kt_did_t *tid; spa_t *spa; - thread_t resume_tid; + kthread_t *resume_thread; + uint64_t object; int error; + int t, d; ztest_exiting = B_FALSE; /* * Initialize parent/child shared state. */ - VERIFY(_mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL) == 0); - VERIFY(rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL) == 0); + mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL); + rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL); zs->zs_thread_start = gethrtime(); zs->zs_thread_stop = zs->zs_thread_start + zopt_passtime * NANOSEC; @@ -5115,7 +5168,7 @@ ztest_run(ztest_shared_t *zs) if (ztest_random(100) < zopt_killrate) zs->zs_thread_kill -= ztest_random(zopt_passtime * NANOSEC); - (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL); + mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL); list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), offsetof(ztest_cb_data_t, zcd_node)); @@ -5142,14 +5195,14 @@ ztest_run(ztest_shared_t *zs) /* * Create a thread to periodically resume suspended I/O. */ - VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND, - &resume_tid) == 0); + VERIFY3P((resume_thread = thread_create(NULL, 0, ztest_resume_thread, + spa, TS_RUN, NULL, 0, 0)), !=, NULL); /* - * Create a deadman thread to abort() if we hang. + * Set a deadman alarm to abort() if we hang. */ - VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND, - NULL) == 0); + signal(SIGALRM, ztest_deadman_alarm); + alarm((zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + GRACE); /* * Verify that we can safely inquire about about any object, @@ -5157,8 +5210,8 @@ ztest_run(ztest_shared_t *zs) * we probe a 5-wide window around each power of two. * This hits all edge cases, including zero and the max. */ - for (int t = 0; t < 64; t++) { - for (int d = -5; d <= 5; d++) { + for (t = 0; t < 64; t++) { + for (d = -5; d <= 5; d++) { error = dmu_object_info(spa->spa_meta_objset, (1ULL << t) + d, NULL); ASSERT(error == 0 || error == ENOENT || @@ -5175,7 +5228,7 @@ ztest_run(ztest_shared_t *zs) } zs->zs_enospc_count = 0; - tid = umem_zalloc(zopt_threads * sizeof (thread_t), UMEM_NOFAIL); + tid = umem_zalloc(zopt_threads * sizeof (kt_did_t), UMEM_NOFAIL); if (zopt_verbose >= 4) (void) printf("starting main threads...\n"); @@ -5183,19 +5236,23 @@ ztest_run(ztest_shared_t *zs) /* * Kick off all the tests that run in parallel. */ - for (int t = 0; t < zopt_threads; t++) { + for (t = 0; t < zopt_threads; t++) { + kthread_t *thread; + if (t < zopt_datasets && ztest_dataset_open(zs, t) != 0) return; - VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t, - THR_BOUND, &tid[t]) == 0); + + VERIFY3P(thread = thread_create(NULL, 0, ztest_thread, + (void *)(uintptr_t)t, TS_RUN, NULL, 0, 0), !=, NULL); + tid[t] = thread->t_tid; } /* * Wait for all of the tests to complete. We go in reverse order * so we don't close datasets while threads are still using them. */ - for (int t = zopt_threads - 1; t >= 0; t--) { - VERIFY(thr_join(tid[t], NULL, NULL) == 0); + for (t = zopt_threads - 1; t >= 0; t--) { + thread_join(tid[t]); if (t < zopt_datasets) ztest_dataset_close(zs, t); } @@ -5205,20 +5262,24 @@ ztest_run(ztest_shared_t *zs) zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); - umem_free(tid, zopt_threads * sizeof (thread_t)); + umem_free(tid, zopt_threads * sizeof (kt_did_t)); /* Kill the resume thread */ ztest_exiting = B_TRUE; - VERIFY(thr_join(resume_tid, NULL, NULL) == 0); + thread_join(resume_thread->t_tid); ztest_resume(spa); /* * Right before closing the pool, kick off a bunch of async I/O; * spa_close() should wait for it to complete. */ - for (uint64_t object = 1; object < 50; object++) + for (object = 1; object < 50; object++) dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20); + /* Verify that at least one commit cb was called in a timely fashion */ + if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG) + VERIFY3U(zc_min_txg_delay, ==, 0); + spa_close(spa, FTAG); /* @@ -5242,6 +5303,13 @@ ztest_run(ztest_shared_t *zs) } kernel_fini(); + + list_destroy(&zcl.zcl_callbacks); + + (void) _mutex_destroy(&zcl.zcl_callbacks_lock); + + (void) rwlock_destroy(&zs->zs_name_lock); + (void) _mutex_destroy(&zs->zs_vdev_lock); } static void @@ -5265,7 +5333,7 @@ ztest_freeze(ztest_shared_t *zs) */ while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { ztest_dmu_object_alloc_free(zd, 0); - zil_commit(zd->zd_zilog, UINT64_MAX, 0); + zil_commit(zd->zd_zilog, 0); } txg_wait_synced(spa_get_dsl(spa), 0); @@ -5292,7 +5360,7 @@ ztest_freeze(ztest_shared_t *zs) /* * Commit all of the changes we just generated. */ - zil_commit(zd->zd_zilog, UINT64_MAX, 0); + zil_commit(zd->zd_zilog, 0); txg_wait_synced(spa_get_dsl(spa), 0); /* @@ -5311,13 +5379,6 @@ ztest_freeze(ztest_shared_t *zs) ztest_dataset_close(zs, 0); spa_close(spa, FTAG); kernel_fini(); - - list_destroy(&zcl.zcl_callbacks); - - (void) _mutex_destroy(&zcl.zcl_callbacks_lock); - - (void) rwlock_destroy(&zs->zs_name_lock); - (void) _mutex_destroy(&zs->zs_vdev_lock); } void @@ -5346,7 +5407,7 @@ print_time(hrtime_t t, char *timebuf) } static nvlist_t * -make_random_props() +make_random_props(void) { nvlist_t *props; @@ -5372,8 +5433,8 @@ ztest_init(ztest_shared_t *zs) spa_t *spa; nvlist_t *nvroot, *props; - VERIFY(_mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL) == 0); - VERIFY(rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL) == 0); + mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL); + rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL); kernel_init(FREAD | FWRITE); @@ -5401,6 +5462,9 @@ ztest_init(ztest_shared_t *zs) ztest_freeze(zs); ztest_run_zdb(zs->zs_pool); + + (void) rw_destroy(&zs->zs_name_lock); + (void) mutex_destroy(&zs->zs_vdev_lock); } int @@ -5414,6 +5478,7 @@ main(int argc, char **argv) char timebuf[100]; char numbuf[6]; spa_t *spa; + int i, f; (void) setvbuf(stdout, NULL, _IOLBF, 0); @@ -5422,7 +5487,8 @@ main(int argc, char **argv) process_options(argc, argv); /* Override location of zpool.cache */ - (void) asprintf((char **)&spa_config_path, "%s/zpool.cache", zopt_dir); + VERIFY(asprintf((char **)&spa_config_path, "%s/zpool.cache", + zopt_dir) != -1); /* * Blow away any existing copy of zpool.cache @@ -5446,7 +5512,7 @@ main(int argc, char **argv) /* * Create and initialize our storage pool. */ - for (int i = 1; i <= zopt_init; i++) { + for (i = 1; i <= zopt_init; i++) { bzero(zs, sizeof (ztest_shared_t)); if (zopt_verbose >= 3 && zopt_init != 1) (void) printf("ztest_init(), pass %d\n", i); @@ -5458,7 +5524,7 @@ main(int argc, char **argv) zs->zs_proc_start = gethrtime(); zs->zs_proc_stop = zs->zs_proc_start + zopt_time * NANOSEC; - for (int f = 0; f < ZTEST_FUNCS; f++) { + for (f = 0; f < ZTEST_FUNCS; f++) { zi = &zs->zs_info[f]; *zi = ztest_info[f]; if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) @@ -5480,7 +5546,7 @@ main(int argc, char **argv) /* * Initialize the workload counters for each function. */ - for (int f = 0; f < ZTEST_FUNCS; f++) { + for (f = 0; f < ZTEST_FUNCS; f++) { zi = &zs->zs_info[f]; zi->zi_call_count = 0; zi->zi_call_time = 0; @@ -5552,7 +5618,7 @@ main(int argc, char **argv) "Calls", "Time", "Function"); (void) printf("%7s %9s %s\n", "-----", "----", "--------"); - for (int f = 0; f < ZTEST_FUNCS; f++) { + for (f = 0; f < ZTEST_FUNCS; f++) { Dl_info dli; zi = &zs->zs_info[f];